source
stringlengths
3
92
c
stringlengths
26
2.25M
2541.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ #pragma omp parallel for num_threads(2) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp parallel for num_threads(2) for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp parallel for num_threads(2) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
api_calls_misc.c
// RUN: %libomp-compile && %libomp-run | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { #pragma omp parallel num_threads(1) { // ompt_get_callback() ompt_callback_t callback; ompt_get_callback(ompt_callback_thread_begin, &callback); printf("%" PRIu64 ": &on_ompt_callback_thread_begin=%p\n", ompt_get_thread_data()->value, &on_ompt_callback_thread_begin); printf("%" PRIu64 ": ompt_get_callback() result=%p\n", ompt_get_thread_data()->value, callback); // ompt_get_state() printf("%" PRIu64 ": ompt_get_state()=%d\n", ompt_get_thread_data()->value, ompt_get_state(NULL)); // ompt_enumerate_states() int state = omp_state_undefined; const char *state_name; int steps = 0; while (ompt_enumerate_states(state, &state, &state_name) && steps < 1000) { steps++; if (!state_name) printf("%" PRIu64 ": state_name is NULL\n", ompt_get_thread_data()->value); } if (steps >= 1000) { // enumeration did not end after 1000 steps printf("%" PRIu64 ": states enumeration did not end\n", ompt_get_thread_data()->value); } // ompt_enumerate_mutex_impls() int impl = ompt_mutex_impl_unknown; const char *impl_name; steps = 0; while (ompt_enumerate_mutex_impls(impl, &impl, &impl_name) && steps < 1000) { steps++; if (!impl_name) printf("%" PRIu64 ": impl_name is NULL\n", ompt_get_thread_data()->value); } if (steps >= 1000) { // enumeration did not end after 1000 steps printf("%" PRIu64 ": mutex_impls enumeration did not end\n", ompt_get_thread_data()->value); } } // Check if libomp supports the callbacks for this test. // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: &on_ompt_callback_thread_begin // CHECK-SAME: =[[FUNCTION_POINTER:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_get_callback() result=[[FUNCTION_POINTER]] // CHECK: {{^}}[[THREAD_ID]]: ompt_get_state()=1 // CHECK-NOT: {{^}}[[THREAD_ID]]: state_name is NULL // CHECK-NOT: {{^}}[[THREAD_ID]]: states enumeration did not end // CHECK-NOT: {{^}}[[THREAD_ID]]: impl_name is NULL // CHECK-NOT: {{^}}[[THREAD_ID]]: mutex_impls enumeration did not end return 0; }
GB_unaryop__minv_fp32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_int8 // op(A') function: GB_tran__minv_fp32_int8 // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_int8 ( float *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lapl_cyl.h
#pragma once #include "sparse.h" #include "tensor.h" #include "fft.h" #include "asp_misc.h" #include "verify.h" namespace fdm { struct LaplCyl3Data { const double dr, dz, dphi; const double dr2, dz2, dphi2; const double r0, lr, lz; const double slz; const int zpoints; const int nr, nz, nphi; const int z1, zn; LaplCyl3Data(double dr, double dz, double r0, double lr, double lz, int nr, int nz, int nphi, tensor_flag zflag = tensor_flag::none) : dr(dr), dz(dz), dphi(2*M_PI/nphi) , dr2(dr*dr), dz2(dz*dz), dphi2(dphi*dphi) , r0(r0), lr(lr), lz(lz), slz(sqrt(2./lz)) , zpoints(zflag==tensor_flag::none?nz+1:nz) , nr(nr), nz(nz), nphi(nphi) , z1(zflag==tensor_flag::none?1:0) , zn(zflag==tensor_flag::none?nz:nz-1) { } }; template<typename T, template<typename> class Solver, bool check> class LaplCyl3FFT1: public LaplCyl3Data { public: constexpr static T SQRT_M_1_PI = 0.56418958354775629; using tensor_flags = fdm::tensor_flags<tensor_flag::periodic>; using tensor = fdm::tensor<T,3,check,tensor_flags>; std::array<int,6> indices; tensor RHS, ANS, RHSm, RHSx; std::vector<T> s, S; // strange std::vector<Solver<T>> solver; FFTTable<T> ft_table; std::vector<FFT<T>> ft; LaplCyl3FFT1(double dr, double dz, double r0, double lr, double lz, int nr, int nz, int nphi) : LaplCyl3Data(dr, dz, r0, lr, lz, nr, nz, nphi) , indices({0, nphi-1, 1, nz, 1, nr}) , RHS(indices), ANS(indices), RHSm(indices), RHSx(indices) , s((nz+1)*nphi), S((nz+1)*nphi) , solver(nphi) , ft_table(nphi) { init_solver(); } ~LaplCyl3FFT1() { } void solve(T* ans, T* rhs) { RHS.use(rhs); ANS.use(ans); #pragma omp parallel for for (int k = 1; k <= nz; k++) { for (int j = 1; j <= nr; j++) { for (int i = 0; i < nphi; i++) { s[k*nphi+i] = RHS[i][k][j]; } ft[k].pFFT_1(&S[k*nphi], &s[k*nphi], dphi*SQRT_M_1_PI); for (int i = 0; i < nphi; i++) { RHSm[i][k][j] = S[k*nphi+i]; } } } #pragma omp parallel for for (int i = 0; i < nphi; i++) { // решаем nphi систем solver[i].solve(&RHSx[i][1][1], &RHSm[i][1][1]); } #pragma omp parallel for for (int k = 1; k <= nz; k++) { for (int j = 1; j <= nr; j++) { for (int i = 0; i < nphi; i++) { s[k*nphi+i] = RHSx[i][k][j]; } ft[k].pFFT(&S[k*nphi], &s[k*nphi], SQRT_M_1_PI); for (int i = 0; i < nphi; i++) { ANS[i][k][j] = S[k*nphi+i]; } } } } private: void init_solver() { ft.reserve(nz+1); for (int i = 0; i < nphi; i++) { init_solver(i); } for (int k = 0; k <= nz; k++) { ft.emplace_back(ft_table, nphi); } } void init_solver(int i) { fdm::tensor<T,2,check> RHS_phi({1,nz,1,nr}); csr_matrix<T> P_phi; T lm = 4.0/dphi2*asp::sq(sin(i*dphi*0.5)); for (int k = 1; k <= nz; k++) { for (int j = 1; j <= nr; j++) { int id = RHS_phi.index({k,j}); double r = r0+j*dr; if (k > 1) { P_phi.add(id, RHS_phi.index({k-1,j}), 1/dz2); } if (j > 1) { P_phi.add(id, RHS_phi.index({k,j-1}), (r-0.5*dr)/dr2/r); } P_phi.add(id, RHS_phi.index({k,j}), -2/dr2-2/dz2-lm/r/r); if (j < nr) { P_phi.add(id, RHS_phi.index({k,j+1}), (r+0.5*dr)/dr2/r); } if (k < nz) { P_phi.add(id, RHS_phi.index({k+1,j}), 1/dz2); } } } P_phi.close(); P_phi.sort_rows(); solver[i] = std::move(P_phi); } }; template<typename T, bool check, tensor_flag zflag=tensor_flag::none> class LaplCyl3FFT2: public LaplCyl3Data { public: constexpr static T SQRT_M_1_PI = 0.56418958354775629; using tensor_flags = fdm::tensor_flags<tensor_flag::periodic,zflag>; using tensor = fdm::tensor<T,3,check,tensor_flags>; std::array<int,6> indices; tensor RHS, ANS, RHSm; int lds; std::vector<T> s, S; FFTTable<T> ft_phi_table; FFTTable<T> ft_z_table_; FFTTable<T>* ft_z_table; std::vector<FFT<T>> ft_phi; std::vector<FFT<T>> ft_z_; std::vector<FFT<T>>& ft_z; std::vector<T> lm_phi, lm_z; fdm::tensor<T,3,check> matrices; fdm::tensor<int,3,check> ipivs; /** \param nr,nz,nphi - число ячеек по соответствующим направлениям \param dr,dz - размер ячеек \param r0 - координата первой точки по оси r (включая точку за границей области для смещенных сеток) \param lr, lz - расстояние от первой до последней точки (включая точки за границами). Для смещенных сеток lr=nr*dr+dr. Для обычных сеток lr=nr*dr. */ LaplCyl3FFT2(double dr, double dz, double r0, double lr, double lz, int nr, int nz, int nphi) : LaplCyl3Data(dr, dz, r0, lr, lz, nr, nz, nphi, zflag) , indices({0, nphi-1, z1, zn, 1, nr}) , RHS(indices), ANS(indices), RHSm(indices) , s(zpoints*nphi), S(zpoints*nphi) , ft_phi_table(nphi) , ft_z_table_(nphi == zpoints ? 1 : zpoints) , ft_z_table(nphi == zpoints ? &ft_phi_table : &ft_z_table_) , ft_z(nphi == zpoints ? ft_phi : ft_z_) , lm_phi(nphi), lm_z(zpoints) , matrices({0,nphi-1,z1,zn,0,4*nr-1}) , ipivs({0,nphi-1,z1,zn,0,nr-1}) { init_solver(); } ~LaplCyl3FFT2() { } void solve(T* ans, T* rhs); private: void init_solver(); }; template<typename T, template<typename> class Solver, bool check> class LaplCyl3Simple { public: const double R, r0; const double h1, h2; const int nr, nz, nphi; const double dr, dz, dphi; const double dr2, dz2, dphi2; Solver<T> solver; LaplCyl3Simple(double R, double r0, double h1, double h2, int nr, int nz, int nphi) : R(R), r0(r0) , h1(h1), h2(h2) , nr(nr), nz(nz), nphi(nphi) , dr((R-r0)/nr), dz((h2-h1)/nz), dphi(2*M_PI/nphi) , dr2(dr*dr), dz2(dz*dz), dphi2(dphi*dphi) { init_solver(); } void solve(T* ans, T* rhs) { using tensor_flags = fdm::tensor_flags<tensor_flag::periodic>; std::array<int,6> indices = {0, nphi-1, 1, nz, 1, nr}; tensor<T,3,check,tensor_flags> RHS(indices, rhs); tensor<T,3,check,tensor_flags> ANS(indices, ans); solver.solve(&ANS[0][1][1], &RHS[0][1][1]); } private: void init_solver() { using tensor_flags = fdm::tensor_flags<tensor_flag::periodic>; std::array<int,6> indices = {0, nphi-1, 1, nz, 1, nr}; tensor<T,3,check,tensor_flags> RHS(indices); csr_matrix<T> P; for (int i = 0; i < nphi; i++) { for (int k = 1; k <= nz; k++) { for (int j = 1; j <= nr; j++) { int id = RHS.index({i,k,j}); double r = r0+j*dr-dr/2; double r2 = r*r; //double rm1 = (r-0.5*dr)/r; //double rm2 = (r+0.5*dr)/r; double rm = 0; double zm = 0; /*if (j > 1) { rm += rm1; } if (j < nr) { rm += rm2; } if (k > 1) { zm += 1; } if (k < nz) { zm += 1; }*/ zm = 2; rm = 2; P.add(id, RHS.index({i-1,k,j}), 1/dphi2/r2); if (k > 1) { P.add(id, RHS.index({i,k-1,j}), 1/dz2); } if (j > 1) { P.add(id, RHS.index({i,k,j-1}), (r-0.5*dr)/dr2/r); } P.add(id, RHS.index({i,k,j}), -rm/dr2-zm/dz2-2/dphi2/r2); if (j < nr) { P.add(id, RHS.index({i,k,j+1}), (r+0.5*dr)/dr2/r); } if (k < nz) { P.add(id, RHS.index({i,k+1,j}), 1/dz2); } P.add(id, RHS.index({i+1,k,j}), 1/dphi2/r2); } } } P.close(); P.sort_rows(); solver = std::move(P); } }; }
mm_parallel.c
/****************************************************************************** * FILE: mm_parallel.c * DESCRIPTION:Matrix Multiplication using OpenMP. * AUTHOR: R.S.Pissardini * Compilation: gcc mm_parallel.c -o multp -lm -fopenmp * Execution: OMP_NUM_THREADS=<p> ./multp <n> where <p> is the number of processes * and <n> is the size of matrix. ******************************************************************************/ #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <assert.h> #include <omp.h> #include <sys/time.h> int main(int argc, char **argv) { long **a, **b, **c; int N = 100; if (argc == 2) { N = atoi (argv[1]); assert (N > 0); } int i,j,k,mul=5; int col_sum = N * (N-1) / 2; a = (long **)malloc (N * sizeof(long *)); b = (long **)malloc (N * sizeof(long *)); c = (long **)malloc (N * sizeof(long *)); for (i=0; i<N; i++) { a[i] = (long *)malloc (N * sizeof(long)); b[i] = (long *)malloc (N * sizeof(long)); c[i] = (long *)malloc (N * sizeof(long)); } double start = omp_get_wtime(); /*****Parallel Region************/ #pragma omp parallel shared (a,b,c) private (i,j,k) { #pragma omp for schedule (static) for (i=0; i<N; i++) for (j=0; j<N; j++) { a[i][j] = i*mul; b[i][j] = i; c[i][j] = 0; } printf ("Matrix generation finished.\n"); #pragma omp for schedule (static) for (i=0; i<N; i++) for (j=0; j<N; j++) for (k=0; k<N; k++) c[i][j] += a[i][k] * b[k][j]; printf ("Multiplication finished.\n"); #pragma omp for schedule (static) for (i=0; i<N; i++) for (j=0; j<N; j++) assert ( c[i][j] == i*mul * col_sum); } /*****End of Parallel Region **********/ double end = omp_get_wtime(); printf("%.16g (s)\n", end - start); printf ("Test finished.\n"); }
simulation.c
/******************************************************************************* * Copyright (C) 2015-2019 Commissariat a l'energie atomique et aux energies alternatives (CEA) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. ******************************************************************************/ #include <mpi.h> #include <omp.h> #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <paraconf.h> // load the PDI header #include <pdi.h> /// size of the local data as [HEIGHT, WIDTH] including ghosts & boundary constants int dsize[2]; /// 2D size of the process grid as [HEIGHT, WIDTH] int psize[2]; /// 2D rank of the local process in the process grid as [YY, XX] int pcoord[2]; /** Initialize the data all to 0 except for the left border (XX==0) initialized to 1 million * \param[out] dat the local data to initialize */ void init(double dat[dsize[0]][dsize[1]]) { for (int yy=0; yy<dsize[0]; ++yy) for (int xx=0; xx<dsize[1]; ++xx) dat[yy][xx] = 0; if ( pcoord[1] == 0 ) for (int yy=0; yy<dsize[0]; ++yy) dat[yy][0] = 1000000; } /** Compute the values at the next time-step based on the values at the current time-step * \param[in] cur the local data at the current time-step * \param[out] next the local data at the next time-step */ void iter(int dsize[2], double cur[dsize[0]][dsize[1]], double next[dsize[0]][dsize[1]]) { for (int xx=0; xx<dsize[1]; ++xx) { next[0][xx] = cur[0][xx]; } #pragma omp parallel for for (int yy=1; yy<dsize[0]-1; ++yy) { next[yy][0] = cur[yy][0]; for (int xx=1; xx<dsize[1]-1; ++xx) { next[yy][xx] = (cur[yy][xx] *.5) + (cur[yy][xx-1] *.125) + (cur[yy][xx+1] *.125) + (cur[yy-1][xx] *.125) + (cur[yy+1][xx] *.125); } next[yy][dsize[1]-1] = cur[yy][dsize[1]-1]; } for (int xx=0; xx<dsize[1]; ++xx) { next[dsize[0]-1][xx] = cur[dsize[0]-1][xx]; } } /** Exchanges ghost values with neighbours * \param[in] cart_comm the MPI communicator with all processes organized in a 2D Cartesian grid * \param[in] cur the local data at the current time-step whose ghosts need exchanging */ void exchange(MPI_Comm cart_comm, double cur[dsize[0]][dsize[1]]) { MPI_Status status; int rank_source, rank_dest; static MPI_Datatype column, row; static int initialized = 0; if ( !initialized ) { MPI_Type_vector(dsize[0]-2, 1, dsize[1], MPI_INT, &column); MPI_Type_commit(&column); MPI_Type_contiguous(dsize[1]-2, MPI_INT, &row); MPI_Type_commit(&row); initialized = 1; } // send down MPI_Cart_shift(cart_comm, 0, 1, &rank_source, &rank_dest); MPI_Sendrecv(&cur[dsize[0]-2][1], 1, row, rank_dest, 100, // send row before ghost &cur[0][1], 1, row, rank_source, 100, // receive 1st row (ghost) cart_comm, &status); // send up MPI_Cart_shift(cart_comm, 0, -1, &rank_source, &rank_dest); MPI_Sendrecv(&cur[1][1], 1, row, rank_dest, 100, // send column after ghost &cur[dsize[0]-1][1], 1, row, rank_source, 100, // receive last column (ghost) cart_comm, &status); // send to the right MPI_Cart_shift(cart_comm, 1, 1, &rank_source, &rank_dest); MPI_Sendrecv(&cur[1][dsize[1]-2], 1, column, rank_dest, 100, // send column before ghost &cur[1][0], 1, column, rank_source, 100, // receive 1st column (ghost) cart_comm, &status); // send to the left MPI_Cart_shift(cart_comm, 1, -1, &rank_source, &rank_dest); MPI_Sendrecv(&cur[1][1], 1, column, rank_dest, 100, // send column after ghost &cur[1][dsize[1]-1], 1, column, rank_source, 100, // receive last column (ghost) cart_comm, &status); } int main( int argc, char* argv[] ) { MPI_Init(&argc, &argv); // load the configuration tree PC_tree_t conf = PC_parse_path("simulation.yml"); // NEVER USE MPI_COMM_WORLD IN THE CODE, use our own communicator main_comm instead MPI_Comm main_comm = MPI_COMM_WORLD; // load the MPI rank & size int psize_1d; MPI_Comm_size(main_comm, &psize_1d); int pcoord_1d; MPI_Comm_rank(main_comm, &pcoord_1d); long longval; // load the global data-size int global_size[2]; PC_int(PC_get(conf, ".global_size.height"), &longval); global_size[0] = longval; PC_int(PC_get(conf, ".global_size.width"), &longval); global_size[1] = longval; // load the parallelism configuration PC_int(PC_get(conf, ".parallelism.height"), &longval); psize[0] = longval; PC_int(PC_get(conf, ".parallelism.width" ), &longval); psize[1] = longval; // load the generation configuration long generations ; PC_int(PC_get(conf, ".MaxtimeSteps" ), &generations); conf = PC_parse_path("simulation.yml"); PDI_init(PC_get(conf, ".pdi")); // check the configuration is coherent assert(global_size[0]%psize[0]==0); assert(global_size[1]%psize[1]==0); assert(psize[1]*psize[0] == psize_1d); // compute the local data-size with space for ghosts and boundary constants dsize[0] = global_size[0]/psize[0] + 2; dsize[1] = global_size[1]/psize[1] + 2; // create a 2D Cartesian MPI communicator & get our coordinate (rank) in it int cart_period[2] = { 0, 0 }; MPI_Comm cart_comm; MPI_Cart_create(main_comm, 2, psize, cart_period, 1, &cart_comm); MPI_Cart_coords(cart_comm, pcoord_1d, 2, pcoord); // allocate memory for the int buffered data double(*cur)[dsize[1]] = malloc(sizeof(double)*dsize[1]*dsize[0]); double(*next)[dsize[1]] = malloc(sizeof(double)*dsize[1]*dsize[0]); // initialize the data content init(cur); // our loop counter so as to be able to use it outside the loop int ii=0; // share useful configuration bits with PDI PDI_multi_expose("init", "pcoord", pcoord, PDI_OUT, "pcoord_1d", &pcoord_1d, PDI_OUT, "dsize", dsize, PDI_OUT, "psize", psize, PDI_OUT, "timestep", &ii, PDI_OUT, "MaxtimeSteps", &generations, PDI_OUT, NULL); // the main loop for (; ii<generations; ++ii) { PDI_multi_expose("Available", "timestep", &ii, PDI_OUT, "local_t", cur, PDI_OUT, NULL); // compute the values for the next iteration iter(dsize, cur, next); // exchange data with the neighbours exchange(cart_comm, next); // swap the current and next values double (*tmp)[dsize[1]] = cur; cur = next; next = tmp; MPI_Barrier(cart_comm); } // finalize PDI PDI_finalize(); // destroy the paraconf configuration tree PC_tree_destroy(&conf); // free the allocated memory free(cur); free(next); // finalize MPI MPI_Finalize(); fprintf(stderr, "[%d] SUCCESS\n", pcoord_1d); return EXIT_SUCCESS; }
hsrp_fmt_plug.c
/* * Cracker for MD5 authentication in HSRP, VRRP and GLBP. * http://www.rfc-editor.org/rfc/rfc1828.txt * * This is dedicated to Darya. You inspire me. * * This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hsrp; #elif FMT_REGISTERS_H john_register_one(&fmt_hsrp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> // OMP_SCALE tuned on core i7 4-core HT // 2048 - 8850k 6679k // 4096 - 10642k 7278k // 8192 - 10489k 7532k // 16k - 10413k 7694k // 32k - 12111k 7803k ** this value chosen // 64k - 12420k 6523k // 128k - 12220k 6741k #define OMP_SCALE 32768 #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "hsrp" #define FORMAT_NAME "\"MD5 authentication\" HSRP, VRRP, GLBP" #define FORMAT_TAG "$hsrp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 55 // Must fit in a single MD5 block #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define REAL_SALT_SIZE 50 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" static struct fmt_tests tests[] = { {"$hsrp$000004030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$52e1db09d18d695b8fefb3730ff8d9d6", "password12345"}, {"$hsrp$000004030a5a01000000000000000000ac102801041c01000000ac1028140000000000000000000000000000000000000000$f15dfa631a0679e0801f8e6b0c0c17ac", "openwall"}, {"$hsrp$000010030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$f02fc41b1b516e2d1261d8800d39ccea", "openwall12345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int length; unsigned char salt[2048]; // be safe ;) } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q || q+1==p) return 0; q = q + 1; // if ((q - p - 1) > REAL_SALT_SIZE * 2) // return 0; len = strspn(q, HEXCHARS); if (len != BINARY_SIZE * 2 || len != strlen(q)) return 0; if (strspn(p, HEXCHARS) != q - p - 1) return 0; if (q-p > (sizeof(cur_salt->salt)-1)*2) return 0; return 1; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; cs.length = len; return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } // this place would normally contain "print_hex" but I do not want to piss of magnum (yet again) #define PUTCHAR(buf, index, val) ((unsigned char*)(buf))[index] = (val) static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { uint32_t block[16] = { 0 }; int len = saved_len[index]; MD5_CTX ctx; MD5_Init(&ctx); // key + keyfill memcpy(block, saved_key[index], len); PUTCHAR(block, len, 0x80); block[14] = len << 3; #if (ARCH_LITTLE_ENDIAN==0) block[14] = JOHNSWAP(block[14]); #endif MD5_Update(&ctx, (unsigned char*)block, 64); // data MD5_Update(&ctx, cur_salt->salt, cur_salt->length); // key (again) MD5_Update(&ctx, saved_key[index], len); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void hsrp_set_key(char *key, int index) { saved_len[index] = strlen(key); /* strncpy will pad with zeros, which is needed */ strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_hsrp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, hsrp_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
generator_test_omp.c
/* Copyright (C) 2009-2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include <math.h> #include <stdlib.h> #include <stdint.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include <stdio.h> #include <omp.h> #include "make_graph.h" int main(int argc, char* argv[]) { int log_numverts; double start, time_taken; int64_t i; int64_t nedges, actual_nedges; int64_t* result; log_numverts = 16; /* In base GRAPHGEN_INITIATOR_SIZE */ if (argc >= 2) log_numverts = atoi(argv[1]); /* Start of graph generation timing */ start = omp_get_wtime(); double initiator[] = {.57, .19, .19, .05}; make_graph(log_numverts, 8. * pow(2., log_numverts), 1, 2, initiator, &nedges, &result); time_taken = omp_get_wtime() - start; /* End of graph generation timing */ actual_nedges = 0; #pragma omp parallel for reduction(+: actual_nedges) for (i = 0; i < nedges; ++i) if (result[i * 2] != (int64_t)(-1)) ++actual_nedges; fprintf(stderr, "%" PRIu64 " edge%s generated and permuted in %fs (%f Medges/s)\n", actual_nedges, (actual_nedges == 1 ? "" : "s"), time_taken, 1. * actual_nedges / time_taken * 1.e-6); free(result); return 0; }
test_schedule.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <string.h> int main() { #pragma omp parallel { omp_sched_t schedule; int chunk_size; #pragma omp parallel for for (int i = 0; i < 1; i++) { if (omp_get_thread_num() == 0) omp_get_schedule(&schedule, &chunk_size); } if (omp_get_thread_num() == 0) { printf("Number of threads = %d / %d\n", omp_get_num_threads(), omp_get_num_procs()); char scheduleStr[15]; switch (schedule) { case omp_sched_static: strcpy(scheduleStr, "static"); break; case omp_sched_dynamic: strcpy(scheduleStr, "dynamic"); break; case omp_sched_guided: strcpy(scheduleStr, "guided"); break; case omp_sched_auto: strcpy(scheduleStr, "auto"); break; default: strcpy(scheduleStr, "monotonic"); break; } printf("Default schedule: %s, %d\n", scheduleStr, chunk_size); } } printf("----------------------------------\n"); return 0; }
find_most_influential.h
//===------------------------------------------------------------*- C++ -*-===// // // Ripples: A C++ Library for Influence Maximization // Marco Minutoli <marco.minutoli@pnnl.gov> // Pacific Northwest National Laboratory // //===----------------------------------------------------------------------===// // // Copyright (c) 2019, Battelle Memorial Institute // // Battelle Memorial Institute (hereinafter Battelle) hereby grants permission // to any person or entity lawfully obtaining a copy of this software and // associated documentation files (hereinafter “the Software”) to redistribute // and use the Software in source and binary forms, with or without // modification. Such person or entity may use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and may permit // others to do so, subject to the following conditions: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Other than as used herein, neither the name Battelle Memorial Institute or // Battelle may be used in any form whatsoever without the express written // consent of Battelle. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //===----------------------------------------------------------------------===// #ifndef RIPPLES_FIND_MOST_INFLUENTIAL_H #define RIPPLES_FIND_MOST_INFLUENTIAL_H #include <algorithm> #include <queue> #include <unordered_set> #include <vector> #include <omp.h> #include "ripples/counting.h" #include "ripples/imm_execution_record.h" #include "ripples/partition.h" #include "ripples/streaming_find_most_influential.h" #include "ripples/utility.h" #include "spdlog/sinks/stdout_color_sinks.h" #include "spdlog/spdlog.h" #ifdef RIPPLES_ENABLE_CUDA #include "ripples/cuda/cuda_generate_rrr_sets.h" #include "ripples/cuda/find_most_influential.h" #include "thrust/count.h" #include "thrust/device_ptr.h" #endif namespace ripples { //! \brief Select k seeds starting from the a list of Random Reverse //! Reachability Sets. //! //! \tparam GraphTy The graph type. //! \tparam RRRset The type storing Random Reverse Reachability Sets. //! \tparam execution_tag The execution policy. //! //! \param G The input graph. //! \param k The size of the seed set. //! \param RRRsets A vector of Random Reverse Reachability sets. //! \param ex_tag The execution policy tag. //! //! \return a pair where the size_t is the number of RRRset covered and //! the set of vertices selected as seeds. template <typename GraphTy, typename ConfTy, typename RRRset> auto FindMostInfluentialSet(const GraphTy &G, const ConfTy &CFG, std::vector<RRRset> &RRRsets, IMMExecutionRecord &record, bool enableGPU, sequential_tag &&ex_tag) { using vertex_type = typename GraphTy::vertex_type; size_t k = CFG.k; std::vector<uint32_t> vertexCoverage(G.num_nodes(), 0); auto cmp = [](std::pair<vertex_type, size_t> &a, std::pair<vertex_type, size_t> &b) { return a.second < b.second; }; using priorityQueue = std::priority_queue<std::pair<vertex_type, size_t>, std::vector<std::pair<vertex_type, size_t>>, decltype(cmp)>; std::vector<std::pair<vertex_type, size_t>> queue_storage(G.num_nodes()); auto counting = measure<>::exec_time([&]() { CountOccurrencies(RRRsets.begin(), RRRsets.end(), vertexCoverage.begin(), vertexCoverage.end(), std::forward<sequential_tag>(ex_tag)); }); InitHeapStorage(vertexCoverage.begin(), vertexCoverage.end(), queue_storage.begin(), queue_storage.end(), std::forward<sequential_tag>(ex_tag)); priorityQueue queue(cmp, std::move(queue_storage)); std::vector<typename GraphTy::vertex_type> result; result.reserve(k); size_t uncovered = RRRsets.size(); auto end = RRRsets.end(); typename IMMExecutionRecord::ex_time_ms pivoting; while (result.size() < k && uncovered != 0) { auto element = queue.top(); queue.pop(); if (element.second > vertexCoverage[element.first]) { element.second = vertexCoverage[element.first]; queue.push(element); continue; } uncovered -= element.second; auto cmp = [=](const RRRset &a) -> auto { return !std::binary_search(a.begin(), a.end(), element.first); }; auto start = std::chrono::high_resolution_clock::now(); auto itr = partition(RRRsets.begin(), end, cmp, std::forward<sequential_tag>(ex_tag)); pivoting += (std::chrono::high_resolution_clock::now() - start); counting += measure<>::exec_time([&]() { if (std::distance(itr, end) < std::distance(RRRsets.begin(), itr)) { UpdateCounters(itr, end, vertexCoverage, std::forward<sequential_tag>(ex_tag)); } else { std::fill(vertexCoverage.begin(), vertexCoverage.end(), 0); CountOccurrencies(RRRsets.begin(), itr, vertexCoverage.begin(), vertexCoverage.end(), std::forward<sequential_tag>(ex_tag)); } }); end = itr; result.push_back(element.first); } double f = double(RRRsets.size() - uncovered) / RRRsets.size(); record.Counting.push_back( std::chrono::duration_cast<typename IMMExecutionRecord::ex_time_ms>( counting)); record.Pivoting.push_back(pivoting); return std::make_pair(f, result); } template <typename GraphTy, typename ConfTy, typename RRRset> auto FindMostInfluentialSet(const GraphTy &G, const ConfTy &CFG, std::vector<RRRset> &RRRsets, IMMExecutionRecord &record, bool enableGPU, omp_parallel_tag &&ex_tag) { size_t num_gpu = 0; size_t num_max_cpu = 0; #pragma omp single { num_max_cpu = std::min<size_t>(omp_get_max_threads(), CFG.seed_select_max_workers); } #ifdef RIPPLES_ENABLE_CUDA if (enableGPU) { num_gpu = std::min(cuda_num_devices(), CFG.seed_select_max_gpu_workers); } #endif StreamingFindMostInfluential<GraphTy> SE(G, RRRsets, num_max_cpu, num_gpu); return SE.find_most_influential_set(CFG.k); } #if RIPPLES_ENABLE_CUDA template <typename Itr> void MoveRRRSets(Itr in_begin, Itr in_end, uint32_t *d_rrr_index, uint32_t *d_rrr_sets, size_t rrr_index_size, size_t rrr_sets_size) { std::vector<uint32_t> buffer(rrr_sets_size); std::vector<uint32_t> buffer2(rrr_sets_size); auto position = buffer.begin(); auto position2 = buffer2.begin(); uint32_t id = 0; for (auto itr = in_begin; itr < in_end; ++itr, ++id) { position = std::copy(itr->begin(), itr->end(), position); std::fill(position2, position2 + itr->size(), id); std::advance(position2, itr->size()); } cuda_h2d(reinterpret_cast<void *>(d_rrr_index), reinterpret_cast<void *>(buffer2.data()), sizeof(uint32_t) * rrr_sets_size); cuda_h2d(reinterpret_cast<void *>(d_rrr_sets), reinterpret_cast<void *>(buffer.data()), sizeof(uint32_t) * rrr_sets_size); } template <typename GraphTy, typename RRRset> auto FindMostInfluentialSet(const GraphTy &G, size_t k, std::vector<RRRset> &RRRsets, IMMExecutionRecord &record, cuda_parallel_tag &&ex_tag) { using vertex_type = typename GraphTy::vertex_type; size_t rrr_sets_size = 0; #pragma omp parallel for reduction(+ : rrr_sets_size) for (auto itr = RRRsets.begin(); itr < RRRsets.end(); ++itr) { rrr_sets_size += itr->size(); } size_t rrr_index_size = rrr_sets_size; uint32_t *d_Counters; cuda_malloc(reinterpret_cast<void **>(&d_Counters), sizeof(uint32_t) * G.num_nodes()); cuda_memset(reinterpret_cast<void **>(d_Counters), 0, sizeof(uint32_t) * G.num_nodes()); uint32_t *d_rrr_index; uint32_t *d_rrr_sets; cuda_malloc(reinterpret_cast<void **>(&d_rrr_index), sizeof(uint32_t) * rrr_index_size); cuda_malloc(reinterpret_cast<void **>(&d_rrr_sets), sizeof(uint32_t) * rrr_sets_size); char *d_rr_mask; cuda_malloc(reinterpret_cast<void **>(&d_rr_mask), sizeof(char) * RRRsets.size()); cuda_memset(reinterpret_cast<void *>(d_rr_mask), 0, sizeof(char) * RRRsets.size()); auto counting = measure<>::exec_time([&]() { MoveRRRSets(RRRsets.begin(), RRRsets.end(), d_rrr_index, d_rrr_sets, rrr_index_size, rrr_sets_size); }); counting += measure<>::exec_time([&]() { CudaCountOccurrencies(d_Counters, d_rrr_sets, rrr_sets_size, G.num_nodes()); }); std::vector<vertex_type> result; size_t uncovered = RRRsets.size(); while (uncovered != 0) { // Find Max auto v = CudaMaxElement(d_Counters, G.num_nodes()); result.push_back(v.first); uncovered -= v.second; std::cout << "Reference Selected : " << v.first << " " << v.second << std::endl; if (result.size() == k) break; // Update Counters counting += measure<>::exec_time([&]() { CudaUpdateCounters(rrr_sets_size, d_rrr_index, d_rrr_sets, d_rr_mask, d_Counters, G.num_nodes(), v.first); }); } cuda_free(d_Counters); cuda_free(d_rrr_index); cuda_free(d_rrr_sets); cuda_free(d_rr_mask); double f = double(RRRsets.size() - uncovered) / RRRsets.size(); record.Counting.push_back( std::chrono::duration_cast<typename IMMExecutionRecord::ex_time_ms>( counting)); return std::make_pair(f, result); } #endif } // namespace ripples #endif // RIPPLES_FIND_MOST_INFLUENTIAL_H
Surface_tools.c
/* Generated by Cython 0.27.3 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_27_3" #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__Surface_tools #define __PYX_HAVE_API__Surface_tools #include <string.h> #include <stdio.h> #include "pythread.h" #include <stdlib.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "Surface_tools.pyx", "stringsource", "type.pxd", "bool.pxd", "complex.pxd", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; struct __pyx_opt_args_13Surface_tools_reshape; /* "Surface_tools.pyx":137 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef reshape(sprite_, factor_=1.0): # <<<<<<<<<<<<<< * """ * RESHAPE ANIMATION OR IMAGE */ struct __pyx_opt_args_13Surface_tools_reshape { int __pyx_n; PyObject *factor_; }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":277 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":328 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":953 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":328 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":953 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_EqObjC(op1, op2, intval, inplace)\ PyObject_RichCompare(op1, op2, Py_EQ) #endif /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* SetItemInt.proto */ #define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, int wraparound, int boundscheck); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_unsigned_char(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_unsigned_char(const char *itemp, PyObject *obj); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE unsigned char __Pyx_PyInt_As_unsigned_char(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_unsigned_char(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_unsigned_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'cpython.version' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'Surface_tools' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static PyObject *__pyx_f_13Surface_tools_make_transparent(PyObject *, int, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_f_13Surface_tools_reshape(PyObject *, int __pyx_skip_dispatch, struct __pyx_opt_args_13Surface_tools_reshape *__pyx_optional_args); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_unsigned_char = { "unsigned char", NULL, sizeof(unsigned char), { 0 }, 0, IS_UNSIGNED(unsigned char) ? 'U' : 'I', IS_UNSIGNED(unsigned char), 0 }; #define __Pyx_MODULE_NAME "Surface_tools" extern int __pyx_module_is_main_Surface_tools; int __pyx_module_is_main_Surface_tools = 0; /* Implementation of 'Surface_tools' */ static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_RGBA[] = "RGBA"; static const char __pyx_k_Rect[] = "Rect"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_copy[] = "copy"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mask[] = "mask"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_alpha[] = "alpha_"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_empty[] = "empty"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_image[] = "image"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_scale[] = "scale"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_uint8[] = "uint8"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_factor[] = "factor_"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_pygame[] = "pygame"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_rotate[] = "rotate"; static const char __pyx_k_sprite[] = "sprite_"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_HWACCEL[] = "HWACCEL"; static const char __pyx_k_Surface[] = "Surface"; static const char __pyx_k_Vector2[] = "Vector2"; static const char __pyx_k_array3d[] = "array3d"; static const char __pyx_k_asarray[] = "asarray"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_image_2[] = "image_"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_RLEACCEL[] = "RLEACCEL"; static const char __pyx_k_SRCALPHA[] = "SRCALPHA"; static const char __pyx_k_get_size[] = "get_size"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pixels3d[] = "pixels3d"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_get_width[] = "get_width"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_frombuffer[] = "frombuffer"; static const char __pyx_k_get_height[] = "get_height"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_array_alpha[] = "array_alpha"; static const char __pyx_k_get_bitsize[] = "get_bitsize"; static const char __pyx_k_pygame_math[] = "pygame.math"; static const char __pyx_k_smoothscale[] = "smoothscale"; static const char __pyx_k_pixels_alpha[] = "pixels_alpha"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_BLEND_RGB_ADD[] = "BLEND_RGB_ADD"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_Invalid_surface[] = "\nInvalid surface."; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pygame_surfarray[] = "pygame.surfarray"; static const char __pyx_k_pygame_transform[] = "pygame.transform"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_Array_shape_not_understood[] = "\nArray shape not understood."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_Argument_factor__must_be_float[] = "\nArgument factor_ must be float or int got %s "; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Argument_factor__incorrect_type[] = "\nArgument factor_ incorrect type must be float, int or tuple got %s "; static const char __pyx_k_Argument_factor__must_be_a_list[] = "\nArgument factor_ must be a list or tuple got %s "; static const char __pyx_k_Image_with_incorrect_shape_must[] = "Image with incorrect shape, must be (w>0, h>0) got (w:%s, h:%s) "; static const char __pyx_k_Pygame_library_is_missing_on_yo[] = "\n<Pygame> library is missing on your system.\nTry: \n C:\\pip install pygame on a window command prompt."; static const char __pyx_k_Surface_without_per_pixel_infor[] = "\nSurface without per-pixel information."; static const char __pyx_k_invalid_value_for_argument_alph[] = "\n[-] invalid value for argument alpha_, range [0..255] got %s "; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Expecting_Surface_for_positional[] = "Expecting Surface for positional argument image_ got %s "; static const char __pyx_k_Expecting_int_for_positional_arg[] = "Expecting int for positional argument alpha_ got %s "; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Argument_factor__incorrect_type; static PyObject *__pyx_kp_s_Argument_factor__must_be_a_list; static PyObject *__pyx_kp_s_Argument_factor__must_be_float; static PyObject *__pyx_kp_s_Array_shape_not_understood; static PyObject *__pyx_n_s_BLEND_RGB_ADD; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Expecting_Surface_for_positional; static PyObject *__pyx_kp_s_Expecting_int_for_positional_arg; static PyObject *__pyx_n_s_HWACCEL; static PyObject *__pyx_kp_s_Image_with_incorrect_shape_must; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_kp_s_Invalid_surface; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_kp_s_Pygame_library_is_missing_on_yo; static PyObject *__pyx_n_s_RGBA; static PyObject *__pyx_n_s_RLEACCEL; static PyObject *__pyx_n_s_Rect; static PyObject *__pyx_n_s_SRCALPHA; static PyObject *__pyx_n_s_Surface; static PyObject *__pyx_kp_s_Surface_without_per_pixel_infor; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_Vector2; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_alpha; static PyObject *__pyx_n_s_array3d; static PyObject *__pyx_n_s_array_alpha; static PyObject *__pyx_n_s_asarray; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_copy; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_empty; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_factor; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_frombuffer; static PyObject *__pyx_n_s_get_bitsize; static PyObject *__pyx_n_s_get_height; static PyObject *__pyx_n_s_get_size; static PyObject *__pyx_n_s_get_width; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_image; static PyObject *__pyx_n_s_image_2; static PyObject *__pyx_n_s_import; static PyObject *__pyx_kp_s_invalid_value_for_argument_alph; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mask; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pixels3d; static PyObject *__pyx_n_s_pixels_alpha; static PyObject *__pyx_n_s_pygame; static PyObject *__pyx_n_s_pygame_math; static PyObject *__pyx_n_s_pygame_surfarray; static PyObject *__pyx_n_s_pygame_transform; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_rotate; static PyObject *__pyx_n_s_scale; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_smoothscale; static PyObject *__pyx_n_s_sprite; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_uint8; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_pf_13Surface_tools_make_transparent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_image_, int __pyx_v_alpha_); /* proto */ static PyObject *__pyx_pf_13Surface_tools_2reshape(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sprite_, PyObject *__pyx_v_factor_); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_float_1_0; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_4; static PyObject *__pyx_int_32; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_slice_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__19; static PyObject *__pyx_slice__20; static PyObject *__pyx_slice__21; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__31; static PyObject *__pyx_codeobj__32; /* "Surface_tools.pyx":42 * @cython.nonecheck(False) * @cython.cdivision(True) * cdef make_array_c_code(unsigned char[:, :, :] rgb_array_c, unsigned char[:, :] alpha_c): # <<<<<<<<<<<<<< * """ * STACK ARRAY RGB VALUES WITH ALPHA CHANNEL. */ static PyObject *__pyx_f_13Surface_tools_make_array_c_code(__Pyx_memviewslice __pyx_v_rgb_array_c, __Pyx_memviewslice __pyx_v_alpha_c) { int __pyx_v_width; int __pyx_v_height; CYTHON_UNUSED PyObject *__pyx_v_e = NULL; __Pyx_memviewslice __pyx_v_new_array = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; int __pyx_v_j; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *(*__pyx_t_8)(PyObject *); int __pyx_t_9; int __pyx_t_10; __Pyx_memviewslice __pyx_t_11 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; unsigned char __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; unsigned char __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; unsigned char __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; unsigned char __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; PyObject *__pyx_t_42 = NULL; __Pyx_RefNannySetupContext("make_array_c_code", 0); /* "Surface_tools.pyx":52 * """ * cdef int width, height * try: # <<<<<<<<<<<<<< * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "Surface_tools.pyx":53 * cdef int width, height * try: * width, height = (<object> rgb_array_c).shape[:2] # <<<<<<<<<<<<<< * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') */ __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_v_rgb_array_c, 3, (PyObject *(*)(char *)) __pyx_memview_get_unsigned_char, (int (*)(char *, PyObject *)) __pyx_memview_set_unsigned_char, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_t_5, 0, 2, NULL, NULL, &__pyx_slice_, 0, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) { PyObject* sequence = __pyx_t_4; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 53, __pyx_L3_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_5 = PyList_GET_ITEM(sequence, 0); __pyx_t_6 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); #else __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); #endif __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { Py_ssize_t index = -1; __pyx_t_7 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext; index = 0; __pyx_t_5 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_5)) goto __pyx_L9_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L9_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) __PYX_ERR(0, 53, __pyx_L3_error) __pyx_t_8 = NULL; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L10_unpacking_done; __pyx_L9_unpacking_failed:; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_8 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 53, __pyx_L3_error) __pyx_L10_unpacking_done:; } __pyx_t_9 = __Pyx_PyInt_As_int(__pyx_t_5); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_6); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_width = __pyx_t_9; __pyx_v_height = __pyx_t_10; /* "Surface_tools.pyx":52 * """ * cdef int width, height * try: # <<<<<<<<<<<<<< * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":54 * try: * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: # <<<<<<<<<<<<<< * raise ValueError('\nArray shape not understood.') * */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 54, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 54, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError) || __Pyx_PyErr_ExceptionMatches(__pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__pyx_t_10) { __Pyx_AddTraceback("Surface_tools.make_array_c_code", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_4, &__pyx_t_5) < 0) __PYX_ERR(0, 54, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __pyx_v_e = __pyx_t_4; /* "Surface_tools.pyx":55 * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') # <<<<<<<<<<<<<< * * cdef: */ __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 55, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(0, 55, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "Surface_tools.pyx":52 * """ * cdef int width, height * try: # <<<<<<<<<<<<<< * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "Surface_tools.pyx":58 * * cdef: * unsigned char[:, :, ::1] new_array = empty((width, height, 4), dtype=uint8) # <<<<<<<<<<<<<< * int i=0, j=0 * # EQUIVALENT TO A NUMPY DSTACK */ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_empty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_width); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_height); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6); __Pyx_INCREF(__pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_int_4); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_uint8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_11 = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(__pyx_t_4); if (unlikely(!__pyx_t_11.memview)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_new_array = __pyx_t_11; __pyx_t_11.memview = NULL; __pyx_t_11.data = NULL; /* "Surface_tools.pyx":59 * cdef: * unsigned char[:, :, ::1] new_array = empty((width, height, 4), dtype=uint8) * int i=0, j=0 # <<<<<<<<<<<<<< * # EQUIVALENT TO A NUMPY DSTACK * with nogil: */ __pyx_v_i = 0; __pyx_v_j = 0; /* "Surface_tools.pyx":61 * int i=0, j=0 * # EQUIVALENT TO A NUMPY DSTACK * with nogil: # <<<<<<<<<<<<<< * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "Surface_tools.pyx":62 * # EQUIVALENT TO A NUMPY DSTACK * with nogil: * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): # <<<<<<<<<<<<<< * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ */ __pyx_t_10 = __pyx_v_width; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_12 = (__pyx_t_10 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_12 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(8) private(__pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) schedule(static) #endif /* _OPENMP */ for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_12; __pyx_t_9++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_9); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "Surface_tools.pyx":63 * with nogil: * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): # <<<<<<<<<<<<<< * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ */ __pyx_t_13 = __pyx_v_height; for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { __pyx_v_j = __pyx_t_14; /* "Surface_tools.pyx":65 * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ # <<<<<<<<<<<<<< * rgb_array_c[i, j, 2], alpha_c[i, j] * return asarray(new_array) */ __pyx_t_15 = __pyx_v_i; __pyx_t_16 = __pyx_v_j; __pyx_t_17 = 0; __pyx_t_18 = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array_c.data + __pyx_t_15 * __pyx_v_rgb_array_c.strides[0]) ) + __pyx_t_16 * __pyx_v_rgb_array_c.strides[1]) ) + __pyx_t_17 * __pyx_v_rgb_array_c.strides[2]) ))); __pyx_t_19 = __pyx_v_i; __pyx_t_20 = __pyx_v_j; __pyx_t_21 = 1; __pyx_t_22 = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array_c.data + __pyx_t_19 * __pyx_v_rgb_array_c.strides[0]) ) + __pyx_t_20 * __pyx_v_rgb_array_c.strides[1]) ) + __pyx_t_21 * __pyx_v_rgb_array_c.strides[2]) ))); /* "Surface_tools.pyx":66 * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ * rgb_array_c[i, j, 2], alpha_c[i, j] # <<<<<<<<<<<<<< * return asarray(new_array) * */ __pyx_t_23 = __pyx_v_i; __pyx_t_24 = __pyx_v_j; __pyx_t_25 = 2; __pyx_t_26 = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array_c.data + __pyx_t_23 * __pyx_v_rgb_array_c.strides[0]) ) + __pyx_t_24 * __pyx_v_rgb_array_c.strides[1]) ) + __pyx_t_25 * __pyx_v_rgb_array_c.strides[2]) ))); __pyx_t_27 = __pyx_v_i; __pyx_t_28 = __pyx_v_j; __pyx_t_29 = (*((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_alpha_c.data + __pyx_t_27 * __pyx_v_alpha_c.strides[0]) ) + __pyx_t_28 * __pyx_v_alpha_c.strides[1]) ))); /* "Surface_tools.pyx":64 * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ # <<<<<<<<<<<<<< * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ * rgb_array_c[i, j, 2], alpha_c[i, j] */ __pyx_t_30 = __pyx_v_i; __pyx_t_31 = __pyx_v_j; __pyx_t_32 = 0; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_30 * __pyx_v_new_array.strides[0]) ) + __pyx_t_31 * __pyx_v_new_array.strides[1]) )) + __pyx_t_32)) )) = __pyx_t_18; __pyx_t_33 = __pyx_v_i; __pyx_t_34 = __pyx_v_j; __pyx_t_35 = 1; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_33 * __pyx_v_new_array.strides[0]) ) + __pyx_t_34 * __pyx_v_new_array.strides[1]) )) + __pyx_t_35)) )) = __pyx_t_22; __pyx_t_36 = __pyx_v_i; __pyx_t_37 = __pyx_v_j; __pyx_t_38 = 2; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_36 * __pyx_v_new_array.strides[0]) ) + __pyx_t_37 * __pyx_v_new_array.strides[1]) )) + __pyx_t_38)) )) = __pyx_t_26; /* "Surface_tools.pyx":65 * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ # <<<<<<<<<<<<<< * rgb_array_c[i, j, 2], alpha_c[i, j] * return asarray(new_array) */ __pyx_t_39 = __pyx_v_i; __pyx_t_40 = __pyx_v_j; __pyx_t_41 = 3; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_39 * __pyx_v_new_array.strides[0]) ) + __pyx_t_40 * __pyx_v_new_array.strides[1]) )) + __pyx_t_41)) )) = __pyx_t_29; } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "Surface_tools.pyx":61 * int i=0, j=0 * # EQUIVALENT TO A NUMPY DSTACK * with nogil: # <<<<<<<<<<<<<< * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L15; } __pyx_L15:; } } /* "Surface_tools.pyx":67 * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ * rgb_array_c[i, j, 2], alpha_c[i, j] * return asarray(new_array) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_asarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_new_array, 3, (PyObject *(*)(char *)) __pyx_memview_get_unsigned_char, (int (*)(char *, PyObject *)) __pyx_memview_set_unsigned_char, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_5) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_6}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_6}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_42 = PyTuple_New(1+1); if (unlikely(!__pyx_t_42)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_42); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_42, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_42, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_42, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_42); __pyx_t_42 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "Surface_tools.pyx":42 * @cython.nonecheck(False) * @cython.cdivision(True) * cdef make_array_c_code(unsigned char[:, :, :] rgb_array_c, unsigned char[:, :] alpha_c): # <<<<<<<<<<<<<< * """ * STACK ARRAY RGB VALUES WITH ALPHA CHANNEL. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1); __Pyx_XDECREF(__pyx_t_42); __Pyx_AddTraceback("Surface_tools.make_array_c_code", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_e); __PYX_XDEC_MEMVIEW(&__pyx_v_new_array, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "Surface_tools.pyx":75 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef make_transparent(image_, int alpha_): # <<<<<<<<<<<<<< * """ * MODIFY TRANSPARENCY TO A PYGAME SURFACE */ static PyObject *__pyx_pw_13Surface_tools_1make_transparent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_13Surface_tools_make_transparent(PyObject *__pyx_v_image_, int __pyx_v_alpha_, CYTHON_UNUSED int __pyx_skip_dispatch) { PyObject *__pyx_v_rgb = NULL; PyObject *__pyx_v_alpha = NULL; int __pyx_v_w; int __pyx_v_h; __Pyx_memviewslice __pyx_v_new_array = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_alpha_array = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_rgb_array = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; int __pyx_v_j; int __pyx_v_a; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; int __pyx_t_10; PyObject *(*__pyx_t_11)(PyObject *); int __pyx_t_12; PyObject *__pyx_t_13 = NULL; __Pyx_memviewslice __pyx_t_14 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_15 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_16 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_17; int __pyx_t_18; int __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; PyObject *__pyx_t_43 = NULL; __Pyx_RefNannySetupContext("make_transparent", 0); /* "Surface_tools.pyx":84 * """ * * assert isinstance(image_, Surface), \ # <<<<<<<<<<<<<< * 'Expecting Surface for positional argument image_ got %s ' % type(image_) * assert isinstance(alpha_, int), \ */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_Surface); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 84, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_IsInstance(__pyx_v_image_, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 84, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!(__pyx_t_2 != 0))) { /* "Surface_tools.pyx":85 * * assert isinstance(image_, Surface), \ * 'Expecting Surface for positional argument image_ got %s ' % type(image_) # <<<<<<<<<<<<<< * assert isinstance(alpha_, int), \ * 'Expecting int for positional argument alpha_ got %s ' % type(alpha_) */ __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_Expecting_Surface_for_positional, ((PyObject *)Py_TYPE(__pyx_v_image_))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); PyErr_SetObject(PyExc_AssertionError, __pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 84, __pyx_L1_error) } } #endif /* "Surface_tools.pyx":86 * assert isinstance(image_, Surface), \ * 'Expecting Surface for positional argument image_ got %s ' % type(image_) * assert isinstance(alpha_, int), \ # <<<<<<<<<<<<<< * 'Expecting int for positional argument alpha_ got %s ' % type(alpha_) * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_alpha_); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_Check(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!(__pyx_t_2 != 0))) { /* "Surface_tools.pyx":87 * 'Expecting Surface for positional argument image_ got %s ' % type(image_) * assert isinstance(alpha_, int), \ * 'Expecting int for positional argument alpha_ got %s ' % type(alpha_) # <<<<<<<<<<<<<< * * if not (0 <= alpha_ <= 255): */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_alpha_); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Expecting_int_for_positional_arg, ((PyObject *)Py_TYPE(__pyx_t_1))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyErr_SetObject(PyExc_AssertionError, __pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 86, __pyx_L1_error) } } #endif /* "Surface_tools.pyx":89 * 'Expecting int for positional argument alpha_ got %s ' % type(alpha_) * * if not (0 <= alpha_ <= 255): # <<<<<<<<<<<<<< * raise ValueError('\n[-] invalid value for argument alpha_, range [0..255] got %s ' % alpha_) * */ __pyx_t_2 = (0 <= __pyx_v_alpha_); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_alpha_ <= 0xFF); } __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "Surface_tools.pyx":90 * * if not (0 <= alpha_ <= 255): * raise ValueError('\n[-] invalid value for argument alpha_, range [0..255] got %s ' % alpha_) # <<<<<<<<<<<<<< * * if not image_.get_bitsize() == 32: */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_alpha_); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_invalid_value_for_argument_alph, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 90, __pyx_L1_error) /* "Surface_tools.pyx":89 * 'Expecting int for positional argument alpha_ got %s ' % type(alpha_) * * if not (0 <= alpha_ <= 255): # <<<<<<<<<<<<<< * raise ValueError('\n[-] invalid value for argument alpha_, range [0..255] got %s ' % alpha_) * */ } /* "Surface_tools.pyx":92 * raise ValueError('\n[-] invalid value for argument alpha_, range [0..255] got %s ' % alpha_) * * if not image_.get_bitsize() == 32: # <<<<<<<<<<<<<< * raise TypeError("\nSurface without per-pixel information.") * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_image_, __pyx_n_s_get_bitsize); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (__pyx_t_5) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_1, __pyx_int_32, 32, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = ((!__pyx_t_4) != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":93 * * if not image_.get_bitsize() == 32: * raise TypeError("\nSurface without per-pixel information.") # <<<<<<<<<<<<<< * * try: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 93, __pyx_L1_error) /* "Surface_tools.pyx":92 * raise ValueError('\n[-] invalid value for argument alpha_, range [0..255] got %s ' % alpha_) * * if not image_.get_bitsize() == 32: # <<<<<<<<<<<<<< * raise TypeError("\nSurface without per-pixel information.") * */ } /* "Surface_tools.pyx":95 * raise TypeError("\nSurface without per-pixel information.") * * try: # <<<<<<<<<<<<<< * rgb = pixels3d(image_) * except (pygame.error, ValueError): */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); /*try:*/ { /* "Surface_tools.pyx":96 * * try: * rgb = pixels3d(image_) # <<<<<<<<<<<<<< * except (pygame.error, ValueError): * raise ValueError('\nInvalid surface.') */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_pixels3d); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (!__pyx_t_5) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_image_); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_v_image_}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L5_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_v_image_}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L5_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 96, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_v_image_); __Pyx_GIVEREF(__pyx_v_image_); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_v_image_); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_rgb = __pyx_t_3; __pyx_t_3 = 0; /* "Surface_tools.pyx":95 * raise TypeError("\nSurface without per-pixel information.") * * try: # <<<<<<<<<<<<<< * rgb = pixels3d(image_) * except (pygame.error, ValueError): */ } __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L10_try_end; __pyx_L5_error:; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; /* "Surface_tools.pyx":97 * try: * rgb = pixels3d(image_) * except (pygame.error, ValueError): # <<<<<<<<<<<<<< * raise ValueError('\nInvalid surface.') * */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 97, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1) || __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_10) { __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_3, &__pyx_t_9) < 0) __PYX_ERR(0, 97, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_9); /* "Surface_tools.pyx":98 * rgb = pixels3d(image_) * except (pygame.error, ValueError): * raise ValueError('\nInvalid surface.') # <<<<<<<<<<<<<< * * try: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 98, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 98, __pyx_L7_except_error) } goto __pyx_L7_except_error; __pyx_L7_except_error:; /* "Surface_tools.pyx":95 * raise TypeError("\nSurface without per-pixel information.") * * try: # <<<<<<<<<<<<<< * rgb = pixels3d(image_) * except (pygame.error, ValueError): */ __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8); goto __pyx_L1_error; __pyx_L10_try_end:; } /* "Surface_tools.pyx":100 * raise ValueError('\nInvalid surface.') * * try: # <<<<<<<<<<<<<< * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_8, &__pyx_t_7, &__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_6); /*try:*/ { /* "Surface_tools.pyx":101 * * try: * alpha = pixels_alpha(image_) # <<<<<<<<<<<<<< * except (pygame.error, ValueError): * raise ValueError('\nSurface without per-pixel information.') */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_pixels_alpha); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L13_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_1) { __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_image_); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 101, __pyx_L13_error) __Pyx_GOTREF(__pyx_t_9); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_image_}; __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 101, __pyx_L13_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_image_}; __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 101, __pyx_L13_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L13_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_v_image_); __Pyx_GIVEREF(__pyx_v_image_); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_v_image_); __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 101, __pyx_L13_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_alpha = __pyx_t_9; __pyx_t_9 = 0; /* "Surface_tools.pyx":100 * raise ValueError('\nInvalid surface.') * * try: # <<<<<<<<<<<<<< * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): */ } __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L18_try_end; __pyx_L13_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":102 * try: * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): # <<<<<<<<<<<<<< * raise ValueError('\nSurface without per-pixel information.') * */ __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 102, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_error); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 102, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_t_3) || __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_10) { __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_9, &__pyx_t_5) < 0) __PYX_ERR(0, 102, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); /* "Surface_tools.pyx":103 * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): * raise ValueError('\nSurface without per-pixel information.') # <<<<<<<<<<<<<< * * cdef int w, h */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 103, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 103, __pyx_L15_except_error) } goto __pyx_L15_except_error; __pyx_L15_except_error:; /* "Surface_tools.pyx":100 * raise ValueError('\nInvalid surface.') * * try: # <<<<<<<<<<<<<< * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): */ __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_8, __pyx_t_7, __pyx_t_6); goto __pyx_L1_error; __pyx_L18_try_end:; } /* "Surface_tools.pyx":106 * * cdef int w, h * w, h = image_.get_size() # <<<<<<<<<<<<<< * * if w==0 or h==0: */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_image_, __pyx_n_s_get_size); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 106, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_3) { __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 106, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 106, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { PyObject* sequence = __pyx_t_5; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 106, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_9 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_9 = PyList_GET_ITEM(sequence, 0); __pyx_t_3 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_9 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 106, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 106, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { Py_ssize_t index = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 106, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_11 = Py_TYPE(__pyx_t_1)->tp_iternext; index = 0; __pyx_t_9 = __pyx_t_11(__pyx_t_1); if (unlikely(!__pyx_t_9)) goto __pyx_L21_unpacking_failed; __Pyx_GOTREF(__pyx_t_9); index = 1; __pyx_t_3 = __pyx_t_11(__pyx_t_1); if (unlikely(!__pyx_t_3)) goto __pyx_L21_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); if (__Pyx_IternextUnpackEndCheck(__pyx_t_11(__pyx_t_1), 2) < 0) __PYX_ERR(0, 106, __pyx_L1_error) __pyx_t_11 = NULL; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L22_unpacking_done; __pyx_L21_unpacking_failed:; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_11 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 106, __pyx_L1_error) __pyx_L22_unpacking_done:; } __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_9); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 106, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_12 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_12 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 106, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_w = __pyx_t_10; __pyx_v_h = __pyx_t_12; /* "Surface_tools.pyx":108 * w, h = image_.get_size() * * if w==0 or h==0: # <<<<<<<<<<<<<< * raise ValueError( * 'Image with incorrect shape, must be (w>0, h>0) got (w:%s, h:%s) ' % (w, h)) */ __pyx_t_4 = ((__pyx_v_w == 0) != 0); if (!__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L24_bool_binop_done; } __pyx_t_4 = ((__pyx_v_h == 0) != 0); __pyx_t_2 = __pyx_t_4; __pyx_L24_bool_binop_done:; if (__pyx_t_2) { /* "Surface_tools.pyx":110 * if w==0 or h==0: * raise ValueError( * 'Image with incorrect shape, must be (w>0, h>0) got (w:%s, h:%s) ' % (w, h)) # <<<<<<<<<<<<<< * * cdef: */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_w); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_h); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_3); __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Image_with_incorrect_shape_must, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":109 * * if w==0 or h==0: * raise ValueError( # <<<<<<<<<<<<<< * 'Image with incorrect shape, must be (w>0, h>0) got (w:%s, h:%s) ' % (w, h)) * */ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 109, __pyx_L1_error) /* "Surface_tools.pyx":108 * w, h = image_.get_size() * * if w==0 or h==0: # <<<<<<<<<<<<<< * raise ValueError( * 'Image with incorrect shape, must be (w>0, h>0) got (w:%s, h:%s) ' % (w, h)) */ } /* "Surface_tools.pyx":113 * * cdef: * unsigned char [:, :, ::1] new_array = numpy.empty((h, w, 4), dtype=numpy.uint8) # <<<<<<<<<<<<<< * unsigned char [:, :] alpha_array = alpha * unsigned char [:, :, :] rgb_array = rgb */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_h); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_w); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5); __Pyx_INCREF(__pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_int_4); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_uint8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_13) < 0) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_14 = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(__pyx_t_13); if (unlikely(!__pyx_t_14.memview)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_v_new_array = __pyx_t_14; __pyx_t_14.memview = NULL; __pyx_t_14.data = NULL; /* "Surface_tools.pyx":114 * cdef: * unsigned char [:, :, ::1] new_array = numpy.empty((h, w, 4), dtype=numpy.uint8) * unsigned char [:, :] alpha_array = alpha # <<<<<<<<<<<<<< * unsigned char [:, :, :] rgb_array = rgb * int i=0, j=0, a */ __pyx_t_15 = __Pyx_PyObject_to_MemoryviewSlice_dsds_unsigned_char(__pyx_v_alpha); if (unlikely(!__pyx_t_15.memview)) __PYX_ERR(0, 114, __pyx_L1_error) __pyx_v_alpha_array = __pyx_t_15; __pyx_t_15.memview = NULL; __pyx_t_15.data = NULL; /* "Surface_tools.pyx":115 * unsigned char [:, :, ::1] new_array = numpy.empty((h, w, 4), dtype=numpy.uint8) * unsigned char [:, :] alpha_array = alpha * unsigned char [:, :, :] rgb_array = rgb # <<<<<<<<<<<<<< * int i=0, j=0, a * */ __pyx_t_16 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_unsigned_char(__pyx_v_rgb); if (unlikely(!__pyx_t_16.memview)) __PYX_ERR(0, 115, __pyx_L1_error) __pyx_v_rgb_array = __pyx_t_16; __pyx_t_16.memview = NULL; __pyx_t_16.data = NULL; /* "Surface_tools.pyx":116 * unsigned char [:, :] alpha_array = alpha * unsigned char [:, :, :] rgb_array = rgb * int i=0, j=0, a # <<<<<<<<<<<<<< * * with nogil: */ __pyx_v_i = 0; __pyx_v_j = 0; /* "Surface_tools.pyx":118 * int i=0, j=0, a * * with nogil: # <<<<<<<<<<<<<< * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "Surface_tools.pyx":120 * with nogil: * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): # <<<<<<<<<<<<<< * for j in range(h): * new_array[j, i, 0] = rgb_array[i, j, 0] */ __pyx_t_12 = __pyx_v_w; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_17 = (__pyx_t_12 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_17 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(8) private(__pyx_t_18, __pyx_t_19, __pyx_t_2, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_a) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) schedule(static) #endif /* _OPENMP */ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_17; __pyx_t_10++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_10); /* Initialize private variables to invalid values */ __pyx_v_a = ((int)0xbad0bad0); __pyx_v_j = ((int)0xbad0bad0); /* "Surface_tools.pyx":121 * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(h): # <<<<<<<<<<<<<< * new_array[j, i, 0] = rgb_array[i, j, 0] * new_array[j, i, 1] = rgb_array[i, j, 1] */ __pyx_t_18 = __pyx_v_h; for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) { __pyx_v_j = __pyx_t_19; /* "Surface_tools.pyx":122 * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(h): * new_array[j, i, 0] = rgb_array[i, j, 0] # <<<<<<<<<<<<<< * new_array[j, i, 1] = rgb_array[i, j, 1] * new_array[j, i, 2] = rgb_array[i, j, 2] */ __pyx_t_20 = __pyx_v_i; __pyx_t_21 = __pyx_v_j; __pyx_t_22 = 0; __pyx_t_23 = __pyx_v_j; __pyx_t_24 = __pyx_v_i; __pyx_t_25 = 0; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_23 * __pyx_v_new_array.strides[0]) ) + __pyx_t_24 * __pyx_v_new_array.strides[1]) )) + __pyx_t_25)) )) = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array.data + __pyx_t_20 * __pyx_v_rgb_array.strides[0]) ) + __pyx_t_21 * __pyx_v_rgb_array.strides[1]) ) + __pyx_t_22 * __pyx_v_rgb_array.strides[2]) ))); /* "Surface_tools.pyx":123 * for j in range(h): * new_array[j, i, 0] = rgb_array[i, j, 0] * new_array[j, i, 1] = rgb_array[i, j, 1] # <<<<<<<<<<<<<< * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ */ __pyx_t_26 = __pyx_v_i; __pyx_t_27 = __pyx_v_j; __pyx_t_28 = 1; __pyx_t_29 = __pyx_v_j; __pyx_t_30 = __pyx_v_i; __pyx_t_31 = 1; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_29 * __pyx_v_new_array.strides[0]) ) + __pyx_t_30 * __pyx_v_new_array.strides[1]) )) + __pyx_t_31)) )) = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array.data + __pyx_t_26 * __pyx_v_rgb_array.strides[0]) ) + __pyx_t_27 * __pyx_v_rgb_array.strides[1]) ) + __pyx_t_28 * __pyx_v_rgb_array.strides[2]) ))); /* "Surface_tools.pyx":124 * new_array[j, i, 0] = rgb_array[i, j, 0] * new_array[j, i, 1] = rgb_array[i, j, 1] * new_array[j, i, 2] = rgb_array[i, j, 2] # <<<<<<<<<<<<<< * a = alpha_array[i, j] - alpha_ * if a < 0: */ __pyx_t_32 = __pyx_v_i; __pyx_t_33 = __pyx_v_j; __pyx_t_34 = 2; __pyx_t_35 = __pyx_v_j; __pyx_t_36 = __pyx_v_i; __pyx_t_37 = 2; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_35 * __pyx_v_new_array.strides[0]) ) + __pyx_t_36 * __pyx_v_new_array.strides[1]) )) + __pyx_t_37)) )) = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array.data + __pyx_t_32 * __pyx_v_rgb_array.strides[0]) ) + __pyx_t_33 * __pyx_v_rgb_array.strides[1]) ) + __pyx_t_34 * __pyx_v_rgb_array.strides[2]) ))); /* "Surface_tools.pyx":125 * new_array[j, i, 1] = rgb_array[i, j, 1] * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ # <<<<<<<<<<<<<< * if a < 0: * a = 0 */ __pyx_t_38 = __pyx_v_i; __pyx_t_39 = __pyx_v_j; __pyx_v_a = ((*((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_alpha_array.data + __pyx_t_38 * __pyx_v_alpha_array.strides[0]) ) + __pyx_t_39 * __pyx_v_alpha_array.strides[1]) ))) - __pyx_v_alpha_); /* "Surface_tools.pyx":126 * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ * if a < 0: # <<<<<<<<<<<<<< * a = 0 * new_array[j, i, 3] = a */ __pyx_t_2 = ((__pyx_v_a < 0) != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":127 * a = alpha_array[i, j] - alpha_ * if a < 0: * a = 0 # <<<<<<<<<<<<<< * new_array[j, i, 3] = a * */ __pyx_v_a = 0; /* "Surface_tools.pyx":126 * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ * if a < 0: # <<<<<<<<<<<<<< * a = 0 * new_array[j, i, 3] = a */ } /* "Surface_tools.pyx":128 * if a < 0: * a = 0 * new_array[j, i, 3] = a # <<<<<<<<<<<<<< * * return pygame.image.frombuffer(new_array, (w, h), 'RGBA') */ __pyx_t_40 = __pyx_v_j; __pyx_t_41 = __pyx_v_i; __pyx_t_42 = 3; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_40 * __pyx_v_new_array.strides[0]) ) + __pyx_t_41 * __pyx_v_new_array.strides[1]) )) + __pyx_t_42)) )) = __pyx_v_a; } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "Surface_tools.pyx":118 * int i=0, j=0, a * * with nogil: # <<<<<<<<<<<<<< * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L28; } __pyx_L28:; } } /* "Surface_tools.pyx":130 * new_array[j, i, 3] = a * * return pygame.image.frombuffer(new_array, (w, h), 'RGBA') # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_image); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_frombuffer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_new_array, 3, (PyObject *(*)(char *)) __pyx_memview_get_unsigned_char, (int (*)(char *, PyObject *)) __pyx_memview_set_unsigned_char, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_w); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_h); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_43 = PyTuple_New(2); if (unlikely(!__pyx_t_43)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_43); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_43, 0, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_43, 1, __pyx_t_3); __pyx_t_9 = 0; __pyx_t_3 = 0; __pyx_t_3 = NULL; __pyx_t_17 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_17 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[4] = {__pyx_t_3, __pyx_t_5, __pyx_t_43, __pyx_n_s_RGBA}; __pyx_t_13 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_17, 3+__pyx_t_17); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_43); __pyx_t_43 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[4] = {__pyx_t_3, __pyx_t_5, __pyx_t_43, __pyx_n_s_RGBA}; __pyx_t_13 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_17, 3+__pyx_t_17); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_43); __pyx_t_43 = 0; } else #endif { __pyx_t_9 = PyTuple_New(3+__pyx_t_17); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3); __pyx_t_3 = NULL; } __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_17, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_43); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_17, __pyx_t_43); __Pyx_INCREF(__pyx_n_s_RGBA); __Pyx_GIVEREF(__pyx_n_s_RGBA); PyTuple_SET_ITEM(__pyx_t_9, 2+__pyx_t_17, __pyx_n_s_RGBA); __pyx_t_5 = 0; __pyx_t_43 = 0; __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_13; __pyx_t_13 = 0; goto __pyx_L0; /* "Surface_tools.pyx":75 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef make_transparent(image_, int alpha_): # <<<<<<<<<<<<<< * """ * MODIFY TRANSPARENCY TO A PYGAME SURFACE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_13); __PYX_XDEC_MEMVIEW(&__pyx_t_14, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_15, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_16, 1); __Pyx_XDECREF(__pyx_t_43); __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_rgb); __Pyx_XDECREF(__pyx_v_alpha); __PYX_XDEC_MEMVIEW(&__pyx_v_new_array, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_alpha_array, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_rgb_array, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_13Surface_tools_1make_transparent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_13Surface_tools_make_transparent[] = "\n MODIFY TRANSPARENCY TO A PYGAME SURFACE \n \n :param image_: Surface; pygame.Surface to modify \n :param alpha_: int; integer value representing the new alpha value \n :return: Surface with new alpha value\n "; static PyObject *__pyx_pw_13Surface_tools_1make_transparent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_image_ = 0; int __pyx_v_alpha_; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("make_transparent (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_image_2,&__pyx_n_s_alpha,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_image_2)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_transparent", 1, 2, 2, 1); __PYX_ERR(0, 75, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "make_transparent") < 0)) __PYX_ERR(0, 75, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_image_ = values[0]; __pyx_v_alpha_ = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_alpha_ == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("make_transparent", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 75, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_13Surface_tools_make_transparent(__pyx_self, __pyx_v_image_, __pyx_v_alpha_); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_13Surface_tools_make_transparent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_image_, int __pyx_v_alpha_) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("make_transparent", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_13Surface_tools_make_transparent(__pyx_v_image_, __pyx_v_alpha_, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "Surface_tools.pyx":137 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef reshape(sprite_, factor_=1.0): # <<<<<<<<<<<<<< * """ * RESHAPE ANIMATION OR IMAGE */ static PyObject *__pyx_pw_13Surface_tools_3reshape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_13Surface_tools_reshape(PyObject *__pyx_v_sprite_, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_13Surface_tools_reshape *__pyx_optional_args) { PyObject *__pyx_v_factor_ = ((PyObject *)__pyx_float_1_0); float __pyx_v_f_factor_; PyObject *__pyx_v_t_factor_ = 0; int __pyx_v_i; int __pyx_v_c1; int __pyx_v_c2; PyObject *__pyx_v_sprite_copy = 0; PyObject *__pyx_v_surface = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; float __pyx_t_7; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; Py_ssize_t __pyx_t_13; PyObject *(*__pyx_t_14)(PyObject *); PyObject *__pyx_t_15 = NULL; __Pyx_RefNannySetupContext("reshape", 0); if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_factor_ = __pyx_optional_args->factor_; } } /* "Surface_tools.pyx":150 * tuple t_factor_ * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * # FLOAT OR INT * try: */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_2 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 150, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":152 * if PyObject_IsInstance(factor_, (float, int)): * # FLOAT OR INT * try: # <<<<<<<<<<<<<< * f_factor_ = <float>factor_ * if f_factor_ == 1.0: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_6); /*try:*/ { /* "Surface_tools.pyx":153 * # FLOAT OR INT * try: * f_factor_ = <float>factor_ # <<<<<<<<<<<<<< * if f_factor_ == 1.0: * return sprite_ */ __pyx_t_7 = __pyx_PyFloat_AsFloat(__pyx_v_factor_); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 153, __pyx_L4_error) __pyx_v_f_factor_ = ((float)__pyx_t_7); /* "Surface_tools.pyx":154 * try: * f_factor_ = <float>factor_ * if f_factor_ == 1.0: # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ __pyx_t_3 = ((__pyx_v_f_factor_ == 1.0) != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":155 * f_factor_ = <float>factor_ * if f_factor_ == 1.0: * return sprite_ # <<<<<<<<<<<<<< * except ValueError: * raise ValueError('\nArgument factor_ must be float or int got %s ' % type(factor_)) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_sprite_); __pyx_r = __pyx_v_sprite_; goto __pyx_L8_try_return; /* "Surface_tools.pyx":154 * try: * f_factor_ = <float>factor_ * if f_factor_ == 1.0: # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ } /* "Surface_tools.pyx":152 * if PyObject_IsInstance(factor_, (float, int)): * # FLOAT OR INT * try: # <<<<<<<<<<<<<< * f_factor_ = <float>factor_ * if f_factor_ == 1.0: */ } __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "Surface_tools.pyx":156 * if f_factor_ == 1.0: * return sprite_ * except ValueError: # <<<<<<<<<<<<<< * raise ValueError('\nArgument factor_ must be float or int got %s ' % type(factor_)) * # TUPLE */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_8) { __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_9, &__pyx_t_10) < 0) __PYX_ERR(0, 156, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_10); /* "Surface_tools.pyx":157 * return sprite_ * except ValueError: * raise ValueError('\nArgument factor_ must be float or int got %s ' % type(factor_)) # <<<<<<<<<<<<<< * # TUPLE * else: */ __pyx_t_11 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__must_be_float, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 157, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 157, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_12, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 157, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(0, 157, __pyx_L6_except_error) } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "Surface_tools.pyx":152 * if PyObject_IsInstance(factor_, (float, int)): * # FLOAT OR INT * try: # <<<<<<<<<<<<<< * f_factor_ = <float>factor_ * if f_factor_ == 1.0: */ __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L1_error; __pyx_L8_try_return:; __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L0; __pyx_L9_try_end:; } /* "Surface_tools.pyx":150 * tuple t_factor_ * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * # FLOAT OR INT * try: */ goto __pyx_L3; } /* "Surface_tools.pyx":160 * # TUPLE * else: * try: # <<<<<<<<<<<<<< * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): */ /*else*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_5, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "Surface_tools.pyx":161 * else: * try: * t_factor_ = tuple(factor_) # <<<<<<<<<<<<<< * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): * return sprite_ */ __pyx_t_10 = __Pyx_PySequence_Tuple(__pyx_v_factor_); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 161, __pyx_L13_error) __Pyx_GOTREF(__pyx_t_10); __pyx_v_t_factor_ = ((PyObject*)__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":162 * try: * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ __pyx_t_7 = __pyx_PyFloat_AsFloat(PyTuple_GET_ITEM(__pyx_v_t_factor_, 0)); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L13_error) __pyx_t_2 = ((((float)__pyx_t_7) == 0.0) != 0); if (__pyx_t_2) { } else { __pyx_t_3 = __pyx_t_2; goto __pyx_L20_bool_binop_done; } __pyx_t_7 = __pyx_PyFloat_AsFloat(PyTuple_GET_ITEM(__pyx_v_t_factor_, 1)); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L13_error) __pyx_t_2 = ((((float)__pyx_t_7) == 0.0) != 0); __pyx_t_3 = __pyx_t_2; __pyx_L20_bool_binop_done:; if (__pyx_t_3) { /* "Surface_tools.pyx":163 * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): * return sprite_ # <<<<<<<<<<<<<< * except ValueError: * raise ValueError('\nArgument factor_ must be a list or tuple got %s ' % type(factor_)) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_sprite_); __pyx_r = __pyx_v_sprite_; goto __pyx_L17_try_return; /* "Surface_tools.pyx":162 * try: * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ } /* "Surface_tools.pyx":160 * # TUPLE * else: * try: # <<<<<<<<<<<<<< * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): */ } __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L18_try_end; __pyx_L13_error:; __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":164 * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): * return sprite_ * except ValueError: # <<<<<<<<<<<<<< * raise ValueError('\nArgument factor_ must be a list or tuple got %s ' % type(factor_)) * */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_8) { __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_10, &__pyx_t_9, &__pyx_t_1) < 0) __PYX_ERR(0, 164, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_1); /* "Surface_tools.pyx":165 * return sprite_ * except ValueError: * raise ValueError('\nArgument factor_ must be a list or tuple got %s ' % type(factor_)) # <<<<<<<<<<<<<< * * cdef: */ __pyx_t_11 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__must_be_a_list, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 165, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 165, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_12, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 165, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(0, 165, __pyx_L15_except_error) } goto __pyx_L15_except_error; __pyx_L15_except_error:; /* "Surface_tools.pyx":160 * # TUPLE * else: * try: # <<<<<<<<<<<<<< * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): */ __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_5, __pyx_t_4); goto __pyx_L1_error; __pyx_L17_try_return:; __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_5, __pyx_t_4); goto __pyx_L0; __pyx_L18_try_end:; } } __pyx_L3:; /* "Surface_tools.pyx":168 * * cdef: * int i = 0 # <<<<<<<<<<<<<< * int w, h * int c1, c2 */ __pyx_v_i = 0; /* "Surface_tools.pyx":171 * int w, h * int c1, c2 * sprite_copy = sprite_.copy() # <<<<<<<<<<<<<< * * if PyObject_IsInstance(factor_, (float, int)): */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_sprite_, __pyx_n_s_copy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_10) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 171, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_sprite_copy = __pyx_t_1; __pyx_t_1 = 0; /* "Surface_tools.pyx":173 * sprite_copy = sprite_.copy() * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_3 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_1); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":174 * * if PyObject_IsInstance(factor_, (float, int)): * if PyObject_IsInstance(sprite_, list): # <<<<<<<<<<<<<< * c1 = <int>(sprite_[i].get_width() * factor_) * c2 = <int>(sprite_[i].get_height() * factor_) */ __pyx_t_2 = PyObject_IsInstance(__pyx_v_sprite_, ((PyObject *)(&PyList_Type))); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 174, __pyx_L1_error) __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":175 * if PyObject_IsInstance(factor_, (float, int)): * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) # <<<<<<<<<<<<<< * c2 = <int>(sprite_[i].get_height() * factor_) * else: */ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_sprite_, __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 175, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_get_width); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 175, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_10))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); } } if (__pyx_t_9) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 175, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 175, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyNumber_Multiply(__pyx_t_1, __pyx_v_factor_); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 175, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_10); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 175, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_c1 = ((int)__pyx_t_8); /* "Surface_tools.pyx":176 * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) * c2 = <int>(sprite_[i].get_height() * factor_) # <<<<<<<<<<<<<< * else: * c1 = <int>(sprite_.get_width() * factor_) */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_sprite_, __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_get_height); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_1) { __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_10 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 176, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyNumber_Multiply(__pyx_t_10, __pyx_v_factor_); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_9); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_c2 = ((int)__pyx_t_8); /* "Surface_tools.pyx":174 * * if PyObject_IsInstance(factor_, (float, int)): * if PyObject_IsInstance(sprite_, list): # <<<<<<<<<<<<<< * c1 = <int>(sprite_[i].get_width() * factor_) * c2 = <int>(sprite_[i].get_height() * factor_) */ goto __pyx_L25; } /* "Surface_tools.pyx":178 * c2 = <int>(sprite_[i].get_height() * factor_) * else: * c1 = <int>(sprite_.get_width() * factor_) # <<<<<<<<<<<<<< * c2 = <int>(sprite_.get_height() * factor_) * */ /*else*/ { __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_sprite_, __pyx_n_s_get_width); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_10))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); } } if (__pyx_t_1) { __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 178, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyNumber_Multiply(__pyx_t_9, __pyx_v_factor_); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_10); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_c1 = ((int)__pyx_t_8); /* "Surface_tools.pyx":179 * else: * c1 = <int>(sprite_.get_width() * factor_) * c2 = <int>(sprite_.get_height() * factor_) # <<<<<<<<<<<<<< * * # ANIMATION */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_sprite_, __pyx_n_s_get_height); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_1) { __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_10 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 179, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyNumber_Multiply(__pyx_t_10, __pyx_v_factor_); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_9); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_c2 = ((int)__pyx_t_8); } __pyx_L25:; /* "Surface_tools.pyx":173 * sprite_copy = sprite_.copy() * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) */ } /* "Surface_tools.pyx":182 * * # ANIMATION * if PyObject_IsInstance(sprite_copy, list): # <<<<<<<<<<<<<< * * for surface in sprite_copy: */ __pyx_t_3 = PyObject_IsInstance(__pyx_v_sprite_copy, ((PyObject *)(&PyList_Type))); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 182, __pyx_L1_error) __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":184 * if PyObject_IsInstance(sprite_copy, list): * * for surface in sprite_copy: # <<<<<<<<<<<<<< * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) */ if (likely(PyList_CheckExact(__pyx_v_sprite_copy)) || PyTuple_CheckExact(__pyx_v_sprite_copy)) { __pyx_t_9 = __pyx_v_sprite_copy; __Pyx_INCREF(__pyx_t_9); __pyx_t_13 = 0; __pyx_t_14 = NULL; } else { __pyx_t_13 = -1; __pyx_t_9 = PyObject_GetIter(__pyx_v_sprite_copy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_14 = Py_TYPE(__pyx_t_9)->tp_iternext; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 184, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_14)) { if (likely(PyList_CheckExact(__pyx_t_9))) { if (__pyx_t_13 >= PyList_GET_SIZE(__pyx_t_9)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_10 = PyList_GET_ITEM(__pyx_t_9, __pyx_t_13); __Pyx_INCREF(__pyx_t_10); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 184, __pyx_L1_error) #else __pyx_t_10 = PySequence_ITEM(__pyx_t_9, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); #endif } else { if (__pyx_t_13 >= PyTuple_GET_SIZE(__pyx_t_9)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_10 = PyTuple_GET_ITEM(__pyx_t_9, __pyx_t_13); __Pyx_INCREF(__pyx_t_10); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 184, __pyx_L1_error) #else __pyx_t_10 = PySequence_ITEM(__pyx_t_9, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); #endif } } else { __pyx_t_10 = __pyx_t_14(__pyx_t_9); if (unlikely(!__pyx_t_10)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 184, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_10); } __Pyx_XDECREF_SET(__pyx_v_surface, __pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":185 * * for surface in sprite_copy: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_10, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_10, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_2 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_10); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":186 * for surface in sprite_copy: * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) # <<<<<<<<<<<<<< * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_11 = __Pyx_PyInt_From_int(__pyx_v_c1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_c2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_15 = PyTuple_New(2); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_15, 0, __pyx_t_11); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_15, 1, __pyx_t_12); __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_12 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_12 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_12)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_12); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_12, __pyx_v_surface, __pyx_t_15}; __pyx_t_10 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_12, __pyx_v_surface, __pyx_t_15}; __pyx_t_10 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } else #endif { __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_12) { __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_12); __pyx_t_12 = NULL; } __Pyx_INCREF(__pyx_v_surface); __Pyx_GIVEREF(__pyx_v_surface); PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_8, __pyx_v_surface); __Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_8, __pyx_t_15); __pyx_t_15 = 0; __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_11, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(__Pyx_SetItemInt(__pyx_v_sprite_copy, __pyx_v_i, __pyx_t_10, int, 1, __Pyx_PyInt_From_int, 0, 0, 0) < 0)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":185 * * for surface in sprite_copy: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ goto __pyx_L29; } /* "Surface_tools.pyx":187 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: */ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(((PyObject *)(&PyTuple_Type))); __Pyx_GIVEREF(((PyObject *)(&PyTuple_Type))); PyTuple_SET_ITEM(__pyx_t_10, 0, ((PyObject *)(&PyTuple_Type))); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_10, 1, ((PyObject *)(&PyList_Type))); __pyx_t_3 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_10); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":188 * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) # <<<<<<<<<<<<<< * else: * raise ValueError('\nArgument factor_ incorrect ' */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_11 = __Pyx_GetItemInt(__pyx_v_factor_, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_15 = __Pyx_GetItemInt(__pyx_v_factor_, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_11); __Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_15); __pyx_t_11 = 0; __pyx_t_15 = 0; __pyx_t_15 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_15)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_15); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_15, __pyx_v_surface, __pyx_t_12}; __pyx_t_10 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_15, __pyx_v_surface, __pyx_t_12}; __pyx_t_10 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif { __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_15) { __Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_15); __pyx_t_15 = NULL; } __Pyx_INCREF(__pyx_v_surface); __Pyx_GIVEREF(__pyx_v_surface); PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_8, __pyx_v_surface); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_8, __pyx_t_12); __pyx_t_12 = 0; __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_11, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(__Pyx_SetItemInt(__pyx_v_sprite_copy, __pyx_v_i, __pyx_t_10, int, 1, __Pyx_PyInt_From_int, 0, 0, 0) < 0)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":187 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: */ goto __pyx_L29; } /* "Surface_tools.pyx":190 * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * i += 1 */ /*else*/ { /* "Surface_tools.pyx":191 * else: * raise ValueError('\nArgument factor_ incorrect ' * 'type must be float, int or tuple got %s ' % type(factor_)) # <<<<<<<<<<<<<< * i += 1 * */ __pyx_t_10 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__incorrect_type, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); /* "Surface_tools.pyx":190 * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * i += 1 */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(0, 190, __pyx_L1_error) } __pyx_L29:; /* "Surface_tools.pyx":192 * raise ValueError('\nArgument factor_ incorrect ' * 'type must be float, int or tuple got %s ' % type(factor_)) * i += 1 # <<<<<<<<<<<<<< * * # SINGLE IMAGE */ __pyx_v_i = (__pyx_v_i + 1); /* "Surface_tools.pyx":184 * if PyObject_IsInstance(sprite_copy, list): * * for surface in sprite_copy: # <<<<<<<<<<<<<< * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) */ } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":182 * * # ANIMATION * if PyObject_IsInstance(sprite_copy, list): # <<<<<<<<<<<<<< * * for surface in sprite_copy: */ goto __pyx_L26; } /* "Surface_tools.pyx":196 * # SINGLE IMAGE * else: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ /*else*/ { __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_9, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_2 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_9); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":197 * else: * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy = smoothscale(sprite_copy,(c1, c2)) # <<<<<<<<<<<<<< * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) */ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_c1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_11 = __Pyx_PyInt_From_int(__pyx_v_c2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_11); __pyx_t_1 = 0; __pyx_t_11 = 0; __pyx_t_11 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_10))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_12}; __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_12}; __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif { __pyx_t_1 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__pyx_t_11) { __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_11); __pyx_t_11 = NULL; } __Pyx_INCREF(__pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_v_sprite_copy); PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_8, __pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_8, __pyx_t_12); __pyx_t_12 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_sprite_copy, __pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":196 * # SINGLE IMAGE * else: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ goto __pyx_L30; } /* "Surface_tools.pyx":198 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: */ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(((PyObject *)(&PyTuple_Type))); __Pyx_GIVEREF(((PyObject *)(&PyTuple_Type))); PyTuple_SET_ITEM(__pyx_t_9, 0, ((PyObject *)(&PyTuple_Type))); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1, ((PyObject *)(&PyList_Type))); __pyx_t_3 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_9); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 198, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":199 * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) # <<<<<<<<<<<<<< * else: * raise ValueError('\nArgument factor_ incorrect ' */ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_factor_, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_factor_, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_11 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_10))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[4] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_1, __pyx_t_12}; __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[4] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_1, __pyx_t_12}; __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif { __pyx_t_15 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); if (__pyx_t_11) { __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_15, 0, __pyx_t_11); __pyx_t_11 = NULL; } __Pyx_INCREF(__pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_v_sprite_copy); PyTuple_SET_ITEM(__pyx_t_15, 0+__pyx_t_8, __pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_15, 1+__pyx_t_8, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_15, 2+__pyx_t_8, __pyx_t_12); __pyx_t_1 = 0; __pyx_t_12 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_15, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_sprite_copy, __pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":198 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: */ goto __pyx_L30; } /* "Surface_tools.pyx":201 * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * */ /*else*/ { /* "Surface_tools.pyx":202 * else: * raise ValueError('\nArgument factor_ incorrect ' * 'type must be float, int or tuple got %s ' % type(factor_)) # <<<<<<<<<<<<<< * * return sprite_copy */ __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__incorrect_type, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "Surface_tools.pyx":201 * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * */ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(0, 201, __pyx_L1_error) } __pyx_L30:; } __pyx_L26:; /* "Surface_tools.pyx":204 * 'type must be float, int or tuple got %s ' % type(factor_)) * * return sprite_copy # <<<<<<<<<<<<<< * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_sprite_copy); __pyx_r = __pyx_v_sprite_copy; goto __pyx_L0; /* "Surface_tools.pyx":137 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef reshape(sprite_, factor_=1.0): # <<<<<<<<<<<<<< * """ * RESHAPE ANIMATION OR IMAGE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_12); __Pyx_XDECREF(__pyx_t_15); __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_t_factor_); __Pyx_XDECREF(__pyx_v_sprite_copy); __Pyx_XDECREF(__pyx_v_surface); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_13Surface_tools_3reshape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_13Surface_tools_2reshape[] = "\n RESHAPE ANIMATION OR IMAGE \n \n :param sprite_: list, image; list containing the surface to rescale\n :param factor_: float, int or tuple; Represent the scale factor (new size)\n :return: return animation or a single image (rescale) \n "; static PyObject *__pyx_pw_13Surface_tools_3reshape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_sprite_ = 0; PyObject *__pyx_v_factor_ = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("reshape (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sprite,&__pyx_n_s_factor,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)__pyx_float_1_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sprite)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_factor); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "reshape") < 0)) __PYX_ERR(0, 137, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_sprite_ = values[0]; __pyx_v_factor_ = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("reshape", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 137, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_13Surface_tools_2reshape(__pyx_self, __pyx_v_sprite_, __pyx_v_factor_); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_13Surface_tools_2reshape(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sprite_, PyObject *__pyx_v_factor_) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; struct __pyx_opt_args_13Surface_tools_reshape __pyx_t_2; __Pyx_RefNannySetupContext("reshape", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_2.__pyx_n = 1; __pyx_t_2.factor_ = __pyx_v_factor_; __pyx_t_1 = __pyx_f_13Surface_tools_reshape(__pyx_v_sprite_, 0, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error) } else { /* "View.MemoryView":121 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":127 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 127, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 127, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":128 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 131, __pyx_L1_error) /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 134, __pyx_L1_error) /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":138 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error) __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":139 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 139, __pyx_L1_error) } __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error) __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":142 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":143 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 146, __pyx_L1_error) /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":151 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(1, 151, __pyx_L1_error) /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":152 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":156 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":157 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":159 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":160 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":162 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 162, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":164 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":167 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":168 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 174, __pyx_L1_error) /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":177 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":178 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 178, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 178, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":179 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":180 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":184 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":186 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 190, __pyx_L1_error) /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":191 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":192 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":193 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":194 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":195 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":196 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":197 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":198 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":201 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":203 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":205 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":214 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":216 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":217 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":225 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":226 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":228 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":229 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":228 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":231 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":232 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":231 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":234 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":235 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":234 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":237 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":238 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 238, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":237 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":242 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":246 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":247 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":246 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":250 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 250, __pyx_L1_error) /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":253 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":242 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":279 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 279, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 279, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":280 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":279 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":282 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":281 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { int __pyx_v_use_setstate; PyObject *__pyx_v_state = NULL; PyObject *__pyx_v__dict = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":3 * def __reduce_cython__(self): * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":4 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":6 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":7 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":9 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":11 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":13 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 15, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":296 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":298 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":302 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":304 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":305 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":304 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":307 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":296 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":343 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 343, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 343, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 343, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 343, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 343, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":344 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":345 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":346 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":347 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 347, __pyx_L1_error) /* "View.MemoryView":348 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":349 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":350 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":348 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":346 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":353 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":354 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":355 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":353 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":356 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":357 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":358 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 359, __pyx_L1_error) /* "View.MemoryView":358 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":356 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":361 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":362 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":361 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":364 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":366 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":368 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":343 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":370 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyThread_type_lock __pyx_t_5; PyThread_type_lock __pyx_t_6; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":371 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":372 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":371 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":376 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":378 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":379 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":380 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":382 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":381 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; /* "View.MemoryView":380 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":383 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":378 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":385 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":376 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":370 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":387 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":389 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":391 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 391, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 391, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 391, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 391, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":392 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 392, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 392, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":391 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":394 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":387 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":397 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":398 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":399 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":398 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":401 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 401, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 401, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":404 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 404, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":405 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":404 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":407 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 407, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":408 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":397 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":410 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":411 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 411, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 411, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":413 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":414 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":415 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 415, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":416 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":415 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":418 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 418, __pyx_L1_error) __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":413 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":420 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":410 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":422 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":423 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":425 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":426 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 426, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":425 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":427 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 427, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":428 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":423 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":430 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":422 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":432 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":436 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 436, __pyx_L1_error) /* "View.MemoryView":437 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 437, __pyx_L1_error) /* "View.MemoryView":438 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":436 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 436, __pyx_L1_error) /* "View.MemoryView":432 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":440 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":442 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":447 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":449 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":450 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":451 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":452 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 452, __pyx_L1_error) /* "View.MemoryView":451 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":453 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":449 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":455 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":457 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":458 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":459 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":458 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":461 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 461, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":465 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":466 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":465 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":467 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":470 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":440 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":472 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":473 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 473, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":474 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":472 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":476 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":479 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":482 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":484 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":488 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":489 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":488 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":490 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":485 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 485, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 485, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":486 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 486, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 486, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":476 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":492 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":495 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":500 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":501 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":500 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 503, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":505 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 505, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":506 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":505 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":506 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":492 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":509 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":510 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":511 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":510 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":513 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":515 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":516 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":515 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":518 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":520 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":521 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":520 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":523 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":525 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":526 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":525 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":528 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":530 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":531 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":532 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":533 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":534 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":535 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":509 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":541 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":542 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 542, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 542, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":543 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 543, __pyx_L1_error) /* "View.MemoryView":544 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":541 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":547 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":548 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":547 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":551 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":552 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":551 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":555 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":556 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":558 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 558, __pyx_L1_error) /* "View.MemoryView":556 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":560 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":555 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":565 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__16, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":564 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":567 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":570 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":571 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 571, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":570 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":574 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":575 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":574 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":578 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":579 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":578 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":584 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":586 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 586, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":587 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":589 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":583 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":591 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":593 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":594 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":595 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":594 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":597 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":593 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":599 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":600 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":601 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":600 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":599 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":603 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":604 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":603 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":607 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":610 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":611 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":607 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":613 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":616 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":617 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":613 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":621 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":623 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":624 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 624, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":633 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":636 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":645 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":646 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":647 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":648 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":645 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":651 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":652 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":651 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":654 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":659 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":660 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 660, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":659 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":662 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":664 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 664, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":665 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":666 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":667 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 667, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 667, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 667, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 667, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":668 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":669 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":670 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 670, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__19); __Pyx_GIVEREF(__pyx_slice__19); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__19); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":671 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":669 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":673 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__20); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 673, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":674 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":668 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":676 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":677 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(1, 677, __pyx_L1_error) /* "View.MemoryView":676 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":679 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":680 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 680, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":667 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":682 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":683 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":684 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__21); __Pyx_GIVEREF(__pyx_slice__21); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__21); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":683 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":686 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":654 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":688 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":689 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":690 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":691 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 691, __pyx_L1_error) /* "View.MemoryView":690 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":688 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":698 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":699 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":706 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":710 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 710, __pyx_L1_error) } } #endif /* "View.MemoryView":712 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":713 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 713, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":714 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":712 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":716 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":717 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":723 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":724 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":729 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":730 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":734 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 734, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 734, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 734, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 734, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":735 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":739 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 739, __pyx_L1_error) /* "View.MemoryView":736 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 736, __pyx_L1_error) /* "View.MemoryView":735 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":742 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":743 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":744 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":745 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":746 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":742 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":748 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":749 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 749, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 749, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 749, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":750 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 750, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 750, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":752 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":753 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 753, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":754 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 754, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":756 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 756, __pyx_L1_error) /* "View.MemoryView":762 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":734 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":764 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":765 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":766 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 766, __pyx_L1_error) } /* "View.MemoryView":767 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 767, __pyx_L1_error) } /* "View.MemoryView":765 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 765, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":764 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":770 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":771 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 770, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":770 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 770, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":698 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":795 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":815 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":817 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":818 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":817 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":819 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":820 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 820, __pyx_L1_error) /* "View.MemoryView":819 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":815 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":823 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":825 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":826 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 826, __pyx_L1_error) /* "View.MemoryView":825 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":829 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":830 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":831 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":832 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":833 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":832 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":830 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":834 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":835 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":836 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":835 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":838 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":834 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":829 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":840 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":841 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":840 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":843 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":845 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":846 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":848 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":848 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":846 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":850 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":851 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":850 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":845 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":853 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":853 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":856 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":858 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":858 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":863 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":865 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":865 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":868 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":869 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":868 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":872 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":873 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":874 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":877 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":877 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":880 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":882 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":883 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":884 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":885 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":884 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":887 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":888 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 887, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":883 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":890 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":882 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":892 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":795 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":898 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":900 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":901 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":904 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":905 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 905, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 905, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":906 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":904 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":908 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":909 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":910 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":911 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":910 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":913 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":914 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":915 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":916 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 916, __pyx_L1_error) /* "View.MemoryView":915 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":913 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":918 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":919 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 919, __pyx_L1_error) /* "View.MemoryView":918 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":921 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":922 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":922 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":925 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":898 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":931 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; /* "View.MemoryView":932 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":934 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":935 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":939 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":940 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":941 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":942 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":944 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":945 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(1, 945, __pyx_L1_error) /* "View.MemoryView":944 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":947 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":931 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":964 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":965 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":964 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":967 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":968 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":969 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":968 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":971 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 971, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":967 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":973 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":974 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":975 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 975, __pyx_L1_error) /* "View.MemoryView":974 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":977 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":973 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":980 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":981 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":980 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":987 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":995 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":996 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":995 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1001 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1003 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1004 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1006 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1007 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1009 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1010 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1011 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1012 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1013 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":1015 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1017 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1018 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1021 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1022 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1023 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1024 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1025 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":1023 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":1027 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1028 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1029 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1031 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1032 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1034 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":987 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1037 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1040 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1041 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1041, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1042 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1040 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1044 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1045 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1037 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1052 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1053 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1054 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1056 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1057 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1059 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1060 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1061 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1062 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1065 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1068 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1069 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1065 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1072 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1079 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1080 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1081 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1079 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1083 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1084 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1086 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1088 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1072 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1094 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1095 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1096 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1095 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1098 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1094 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1101 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1106 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1107 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1109 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1110 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1111 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1112 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1110 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1114 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1115 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1116 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1117 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1115 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1119 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1120 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1119 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1122 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1101 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1125 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1132 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1133 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1134 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1135 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1137 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1139 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1140 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1142 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1143 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1144 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1145 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1137 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1147 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1148 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1152 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1153 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1125 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1155 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1158 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1155 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1165 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1167 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1168 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1170 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1173 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1182 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1183 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1184 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1185 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1182 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1187 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1188 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1189 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1191 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1173 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1194 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; /* "View.MemoryView":1205 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1206 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1208 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1209 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1210 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1210, __pyx_L1_error) /* "View.MemoryView":1209 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1213 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1214 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1215 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1216 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1217 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1219 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1223 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1224 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1225 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1224 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1227 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1228 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1227 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1230 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1232 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1194 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1237 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1240 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1239 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 1239, __pyx_L1_error) /* "View.MemoryView":1237 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1243 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1244 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1244, __pyx_L1_error) /* "View.MemoryView":1243 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1248 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1249 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1249, __pyx_L1_error) /* "View.MemoryView":1248 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1251 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1251, __pyx_L1_error) } /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; /* "View.MemoryView":1262 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1263 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1265 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1266 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1267 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1270 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1270 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1272 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1273 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1272 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1275 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1277 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1278 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1279 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1280 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1281 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1279 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1283 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1283, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1278 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1285 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1286 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1286, __pyx_L1_error) /* "View.MemoryView":1285 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1288 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1291 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1290 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1293 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == ((void *)NULL))) __PYX_ERR(1, 1293, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1294 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1288 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1296 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1299 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1299 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1301 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1301 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1304 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1306 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1307 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1308 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1309 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1310 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1304 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1296 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1312 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1315 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1315, __pyx_L1_error) /* "View.MemoryView":1316 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1316, __pyx_L1_error) /* "View.MemoryView":1312 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1318 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1319 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1320 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1323 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1330 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1332 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1333 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1334 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1335 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1337 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1338 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1339 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1340 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1348 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1352 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1353 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1352 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1348 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1357 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1360 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1357 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1363 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1367 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1368 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1369 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1370 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1369 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1372 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1368 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1374 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1375 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1377 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1363 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1383 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1386 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1387 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1389 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1383 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1393 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1397 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1398 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1400 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1401 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1402 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1403 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1400 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1405 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1406 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1408 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1393 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = NULL; PyObject *__pyx_v___pyx_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":3 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":4 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_5) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":5 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_6) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_INCREF(__pyx_v___pyx_type); __Pyx_GIVEREF(__pyx_v___pyx_type); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v___pyx_type); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_7 = (__pyx_t_1 != 0); if (__pyx_t_7) { /* "(tree fragment)":7 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 7, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":8 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":10 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 10, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 10, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 11, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":12 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_8) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {"make_transparent", (PyCFunction)__pyx_pw_13Surface_tools_1make_transparent, METH_VARARGS|METH_KEYWORDS, __pyx_doc_13Surface_tools_make_transparent}, {"reshape", (PyCFunction)__pyx_pw_13Surface_tools_3reshape, METH_VARARGS|METH_KEYWORDS, __pyx_doc_13Surface_tools_2reshape}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_Surface_tools(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_Surface_tools}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "Surface_tools", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Argument_factor__incorrect_type, __pyx_k_Argument_factor__incorrect_type, sizeof(__pyx_k_Argument_factor__incorrect_type), 0, 0, 1, 0}, {&__pyx_kp_s_Argument_factor__must_be_a_list, __pyx_k_Argument_factor__must_be_a_list, sizeof(__pyx_k_Argument_factor__must_be_a_list), 0, 0, 1, 0}, {&__pyx_kp_s_Argument_factor__must_be_float, __pyx_k_Argument_factor__must_be_float, sizeof(__pyx_k_Argument_factor__must_be_float), 0, 0, 1, 0}, {&__pyx_kp_s_Array_shape_not_understood, __pyx_k_Array_shape_not_understood, sizeof(__pyx_k_Array_shape_not_understood), 0, 0, 1, 0}, {&__pyx_n_s_BLEND_RGB_ADD, __pyx_k_BLEND_RGB_ADD, sizeof(__pyx_k_BLEND_RGB_ADD), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Expecting_Surface_for_positional, __pyx_k_Expecting_Surface_for_positional, sizeof(__pyx_k_Expecting_Surface_for_positional), 0, 0, 1, 0}, {&__pyx_kp_s_Expecting_int_for_positional_arg, __pyx_k_Expecting_int_for_positional_arg, sizeof(__pyx_k_Expecting_int_for_positional_arg), 0, 0, 1, 0}, {&__pyx_n_s_HWACCEL, __pyx_k_HWACCEL, sizeof(__pyx_k_HWACCEL), 0, 0, 1, 1}, {&__pyx_kp_s_Image_with_incorrect_shape_must, __pyx_k_Image_with_incorrect_shape_must, sizeof(__pyx_k_Image_with_incorrect_shape_must), 0, 0, 1, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_surface, __pyx_k_Invalid_surface, sizeof(__pyx_k_Invalid_surface), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_kp_s_Pygame_library_is_missing_on_yo, __pyx_k_Pygame_library_is_missing_on_yo, sizeof(__pyx_k_Pygame_library_is_missing_on_yo), 0, 0, 1, 0}, {&__pyx_n_s_RGBA, __pyx_k_RGBA, sizeof(__pyx_k_RGBA), 0, 0, 1, 1}, {&__pyx_n_s_RLEACCEL, __pyx_k_RLEACCEL, sizeof(__pyx_k_RLEACCEL), 0, 0, 1, 1}, {&__pyx_n_s_Rect, __pyx_k_Rect, sizeof(__pyx_k_Rect), 0, 0, 1, 1}, {&__pyx_n_s_SRCALPHA, __pyx_k_SRCALPHA, sizeof(__pyx_k_SRCALPHA), 0, 0, 1, 1}, {&__pyx_n_s_Surface, __pyx_k_Surface, sizeof(__pyx_k_Surface), 0, 0, 1, 1}, {&__pyx_kp_s_Surface_without_per_pixel_infor, __pyx_k_Surface_without_per_pixel_infor, sizeof(__pyx_k_Surface_without_per_pixel_infor), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_Vector2, __pyx_k_Vector2, sizeof(__pyx_k_Vector2), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, {&__pyx_n_s_array3d, __pyx_k_array3d, sizeof(__pyx_k_array3d), 0, 0, 1, 1}, {&__pyx_n_s_array_alpha, __pyx_k_array_alpha, sizeof(__pyx_k_array_alpha), 0, 0, 1, 1}, {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_factor, __pyx_k_factor, sizeof(__pyx_k_factor), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_frombuffer, __pyx_k_frombuffer, sizeof(__pyx_k_frombuffer), 0, 0, 1, 1}, {&__pyx_n_s_get_bitsize, __pyx_k_get_bitsize, sizeof(__pyx_k_get_bitsize), 0, 0, 1, 1}, {&__pyx_n_s_get_height, __pyx_k_get_height, sizeof(__pyx_k_get_height), 0, 0, 1, 1}, {&__pyx_n_s_get_size, __pyx_k_get_size, sizeof(__pyx_k_get_size), 0, 0, 1, 1}, {&__pyx_n_s_get_width, __pyx_k_get_width, sizeof(__pyx_k_get_width), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_image, __pyx_k_image, sizeof(__pyx_k_image), 0, 0, 1, 1}, {&__pyx_n_s_image_2, __pyx_k_image_2, sizeof(__pyx_k_image_2), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_kp_s_invalid_value_for_argument_alph, __pyx_k_invalid_value_for_argument_alph, sizeof(__pyx_k_invalid_value_for_argument_alph), 0, 0, 1, 0}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pixels3d, __pyx_k_pixels3d, sizeof(__pyx_k_pixels3d), 0, 0, 1, 1}, {&__pyx_n_s_pixels_alpha, __pyx_k_pixels_alpha, sizeof(__pyx_k_pixels_alpha), 0, 0, 1, 1}, {&__pyx_n_s_pygame, __pyx_k_pygame, sizeof(__pyx_k_pygame), 0, 0, 1, 1}, {&__pyx_n_s_pygame_math, __pyx_k_pygame_math, sizeof(__pyx_k_pygame_math), 0, 0, 1, 1}, {&__pyx_n_s_pygame_surfarray, __pyx_k_pygame_surfarray, sizeof(__pyx_k_pygame_surfarray), 0, 0, 1, 1}, {&__pyx_n_s_pygame_transform, __pyx_k_pygame_transform, sizeof(__pyx_k_pygame_transform), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_rotate, __pyx_k_rotate, sizeof(__pyx_k_rotate), 0, 0, 1, 1}, {&__pyx_n_s_scale, __pyx_k_scale, sizeof(__pyx_k_scale), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_smoothscale, __pyx_k_smoothscale, sizeof(__pyx_k_smoothscale), 0, 0, 1, 1}, {&__pyx_n_s_sprite, __pyx_k_sprite, sizeof(__pyx_k_sprite), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_uint8, __pyx_k_uint8, sizeof(__pyx_k_uint8), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(0, 15, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 54, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 63, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 93, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 601, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 820, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "Surface_tools.pyx":53 * cdef int width, height * try: * width, height = (<object> rgb_array_c).shape[:2] # <<<<<<<<<<<<<< * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') */ __pyx_slice_ = PySlice_New(Py_None, __pyx_int_2, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 53, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); /* "Surface_tools.pyx":55 * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') # <<<<<<<<<<<<<< * * cdef: */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Array_shape_not_understood); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 55, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "Surface_tools.pyx":93 * * if not image_.get_bitsize() == 32: * raise TypeError("\nSurface without per-pixel information.") # <<<<<<<<<<<<<< * * try: */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Surface_without_per_pixel_infor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "Surface_tools.pyx":98 * rgb = pixels3d(image_) * except (pygame.error, ValueError): * raise ValueError('\nInvalid surface.') # <<<<<<<<<<<<<< * * try: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Invalid_surface); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "Surface_tools.pyx":103 * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): * raise ValueError('\nSurface without per-pixel information.') # <<<<<<<<<<<<<< * * cdef int w, h */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Surface_without_per_pixel_infor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":486 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 486, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":558 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":565 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__16 = PyTuple_New(1); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__16, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "View.MemoryView":670 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__19 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__19)) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__19); __Pyx_GIVEREF(__pyx_slice__19); /* "View.MemoryView":673 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__20 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__20)) __PYX_ERR(1, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__20); __Pyx_GIVEREF(__pyx_slice__20); /* "View.MemoryView":684 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__21 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__21)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__21); __Pyx_GIVEREF(__pyx_slice__21); /* "View.MemoryView":691 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "Surface_tools.pyx":32 * * except ImportError: * raise ImportError("\n<Pygame> library is missing on your system." # <<<<<<<<<<<<<< * "\nTry: \n C:\\pip install pygame on a window command prompt.") * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_Pygame_library_is_missing_on_yo); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "View.MemoryView":284 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); /* "View.MemoryView":285 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); /* "View.MemoryView":286 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); /* "View.MemoryView":289 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); /* "View.MemoryView":290 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_tuple__31 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_32 = PyInt_FromLong(32); if (unlikely(!__pyx_int_32)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initSurface_tools(void); /*proto*/ PyMODINIT_FUNC initSurface_tools(void) #else PyMODINIT_FUNC PyInit_Surface_tools(void); /*proto*/ PyMODINIT_FUNC PyInit_Surface_tools(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_Surface_tools(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; static PyThread_type_lock __pyx_t_9[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_Surface_tools(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("Surface_tools", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_Surface_tools) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "Surface_tools")) { if (unlikely(PyDict_SetItemString(modules, "Surface_tools", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 277, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 277, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) __PYX_ERR(3, 8, __pyx_L1_error) __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) __PYX_ERR(4, 15, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "Surface_tools.pyx":4 * ###cython: boundscheck=False, wraparound=False, nonecheck=False, optimize.use_switch=True * * try: # <<<<<<<<<<<<<< * cimport cython * from cython.parallel cimport prange */ { (void)__pyx_t_1; (void)__pyx_t_2; (void)__pyx_t_3; /* mark used */ /*try:*/ { /* "Surface_tools.pyx":13 * PyDict_Values, PyDict_Keys, PyDict_Items * from cpython.list cimport PyList_Append, PyList_GetItem, PyList_Size, PyList_SetItem * from cpython.object cimport PyObject_SetAttr # <<<<<<<<<<<<<< * * except ImportError: */ } } /* "Surface_tools.pyx":19 * "\nTry: \n C:\\pip install cython on a window command prompt.") * * import numpy # <<<<<<<<<<<<<< * from numpy import empty, uint8, asarray * */ __pyx_t_4 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_4) < 0) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":20 * * import numpy * from numpy import empty, uint8, asarray # <<<<<<<<<<<<<< * * try: */ __pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_empty); __Pyx_GIVEREF(__pyx_n_s_empty); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_empty); __Pyx_INCREF(__pyx_n_s_uint8); __Pyx_GIVEREF(__pyx_n_s_uint8); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_n_s_uint8); __Pyx_INCREF(__pyx_n_s_asarray); __Pyx_GIVEREF(__pyx_n_s_asarray); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_n_s_asarray); __pyx_t_5 = __Pyx_Import(__pyx_n_s_numpy, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_empty, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_uint8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_uint8, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_asarray, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":22 * from numpy import empty, uint8, asarray * * try: # <<<<<<<<<<<<<< * import pygame * from pygame import Rect */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_1); /*try:*/ { /* "Surface_tools.pyx":23 * * try: * import pygame # <<<<<<<<<<<<<< * from pygame import Rect * from pygame.math import Vector2 */ __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame, 0, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pygame, __pyx_t_5) < 0) __PYX_ERR(0, 23, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":24 * try: * import pygame * from pygame import Rect # <<<<<<<<<<<<<< * from pygame.math import Vector2 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL */ __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_s_Rect); __Pyx_GIVEREF(__pyx_n_s_Rect); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_n_s_Rect); __pyx_t_4 = __Pyx_Import(__pyx_n_s_pygame, __pyx_t_5, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_Rect); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Rect, __pyx_t_5) < 0) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":25 * import pygame * from pygame import Rect * from pygame.math import Vector2 # <<<<<<<<<<<<<< * from pygame import Rect, BLEND_RGB_ADD, HWACCEL * from pygame import Surface, SRCALPHA, mask, RLEACCEL */ __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_Vector2); __Pyx_GIVEREF(__pyx_n_s_Vector2); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_Vector2); __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame_math, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_Vector2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Vector2, __pyx_t_4) < 0) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":26 * from pygame import Rect * from pygame.math import Vector2 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL # <<<<<<<<<<<<<< * from pygame import Surface, SRCALPHA, mask, RLEACCEL * from pygame.transform import rotate, scale, smoothscale */ __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_s_Rect); __Pyx_GIVEREF(__pyx_n_s_Rect); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_n_s_Rect); __Pyx_INCREF(__pyx_n_s_BLEND_RGB_ADD); __Pyx_GIVEREF(__pyx_n_s_BLEND_RGB_ADD); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_n_s_BLEND_RGB_ADD); __Pyx_INCREF(__pyx_n_s_HWACCEL); __Pyx_GIVEREF(__pyx_n_s_HWACCEL); PyList_SET_ITEM(__pyx_t_5, 2, __pyx_n_s_HWACCEL); __pyx_t_4 = __Pyx_Import(__pyx_n_s_pygame, __pyx_t_5, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_Rect); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Rect, __pyx_t_5) < 0) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_BLEND_RGB_ADD); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_BLEND_RGB_ADD, __pyx_t_5) < 0) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_HWACCEL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HWACCEL, __pyx_t_5) < 0) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":27 * from pygame.math import Vector2 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL * from pygame import Surface, SRCALPHA, mask, RLEACCEL # <<<<<<<<<<<<<< * from pygame.transform import rotate, scale, smoothscale * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha */ __pyx_t_4 = PyList_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_Surface); __Pyx_GIVEREF(__pyx_n_s_Surface); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_Surface); __Pyx_INCREF(__pyx_n_s_SRCALPHA); __Pyx_GIVEREF(__pyx_n_s_SRCALPHA); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_n_s_SRCALPHA); __Pyx_INCREF(__pyx_n_s_mask); __Pyx_GIVEREF(__pyx_n_s_mask); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_n_s_mask); __Pyx_INCREF(__pyx_n_s_RLEACCEL); __Pyx_GIVEREF(__pyx_n_s_RLEACCEL); PyList_SET_ITEM(__pyx_t_4, 3, __pyx_n_s_RLEACCEL); __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_Surface); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Surface, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_SRCALPHA); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_SRCALPHA, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_mask, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_RLEACCEL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_RLEACCEL, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":28 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL * from pygame import Surface, SRCALPHA, mask, RLEACCEL * from pygame.transform import rotate, scale, smoothscale # <<<<<<<<<<<<<< * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha * */ __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_s_rotate); __Pyx_GIVEREF(__pyx_n_s_rotate); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_n_s_rotate); __Pyx_INCREF(__pyx_n_s_scale); __Pyx_GIVEREF(__pyx_n_s_scale); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_n_s_scale); __Pyx_INCREF(__pyx_n_s_smoothscale); __Pyx_GIVEREF(__pyx_n_s_smoothscale); PyList_SET_ITEM(__pyx_t_5, 2, __pyx_n_s_smoothscale); __pyx_t_4 = __Pyx_Import(__pyx_n_s_pygame_transform, __pyx_t_5, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_rotate); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_rotate, __pyx_t_5) < 0) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_scale); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_scale, __pyx_t_5) < 0) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_smoothscale); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_smoothscale, __pyx_t_5) < 0) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":29 * from pygame import Surface, SRCALPHA, mask, RLEACCEL * from pygame.transform import rotate, scale, smoothscale * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha # <<<<<<<<<<<<<< * * except ImportError: */ __pyx_t_4 = PyList_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_array3d); __Pyx_GIVEREF(__pyx_n_s_array3d); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_array3d); __Pyx_INCREF(__pyx_n_s_pixels3d); __Pyx_GIVEREF(__pyx_n_s_pixels3d); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_n_s_pixels3d); __Pyx_INCREF(__pyx_n_s_array_alpha); __Pyx_GIVEREF(__pyx_n_s_array_alpha); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_n_s_array_alpha); __Pyx_INCREF(__pyx_n_s_pixels_alpha); __Pyx_GIVEREF(__pyx_n_s_pixels_alpha); PyList_SET_ITEM(__pyx_t_4, 3, __pyx_n_s_pixels_alpha); __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame_surfarray, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_array3d); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_array3d, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_pixels3d); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pixels3d, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_array_alpha); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_array_alpha, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_pixels_alpha); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pixels_alpha, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":22 * from numpy import empty, uint8, asarray * * try: # <<<<<<<<<<<<<< * import pygame * from pygame import Rect */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L13_try_end; __pyx_L8_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":31 * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha * * except ImportError: # <<<<<<<<<<<<<< * raise ImportError("\n<Pygame> library is missing on your system." * "\nTry: \n C:\\pip install pygame on a window command prompt.") */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ImportError); if (__pyx_t_6) { __Pyx_AddTraceback("Surface_tools", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_4, &__pyx_t_7) < 0) __PYX_ERR(0, 31, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_7); /* "Surface_tools.pyx":32 * * except ImportError: * raise ImportError("\n<Pygame> library is missing on your system." # <<<<<<<<<<<<<< * "\nTry: \n C:\\pip install pygame on a window command prompt.") * */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 32, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 32, __pyx_L10_except_error) } goto __pyx_L10_except_error; __pyx_L10_except_error:; /* "Surface_tools.pyx":22 * from numpy import empty, uint8, asarray * * try: # <<<<<<<<<<<<<< * import pygame * from pygame import Rect */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_1); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1); goto __pyx_L1_error; __pyx_L13_try_end:; } /* "Surface_tools.pyx":1 * # encoding: utf-8 # <<<<<<<<<<<<<< * ###cython: boundscheck=False, wraparound=False, nonecheck=False, optimize.use_switch=True * */ __pyx_t_7 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_7) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":207 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_7 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_7) < 0) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":284 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":285 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":286 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":289 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":290 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":314 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":315 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_9[0] = PyThread_allocate_lock(); __pyx_t_9[1] = PyThread_allocate_lock(); __pyx_t_9[2] = PyThread_allocate_lock(); __pyx_t_9[3] = PyThread_allocate_lock(); __pyx_t_9[4] = PyThread_allocate_lock(); __pyx_t_9[5] = PyThread_allocate_lock(); __pyx_t_9[6] = PyThread_allocate_lock(); __pyx_t_9[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_9, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":537 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 537, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_7) < 0) __PYX_ERR(1, 537, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":983 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_7) < 0) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_t_7 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_7) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init Surface_tools", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init Surface_tools"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_USE_TYPE_SLOTS PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_USE_TYPE_SLOTS result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if PY_VERSION_HEX >= 0x030700A2 *type = tstate->exc_state.exc_type; *value = tstate->exc_state.exc_value; *tb = tstate->exc_state.exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = type; tstate->exc_state.exc_value = value; tstate->exc_state.exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = local_type; tstate->exc_state.exc_value = local_value; tstate->exc_state.exc_traceback = local_tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = f->f_localsplus; for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { if (op1 == op2) { Py_RETURN_TRUE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a == b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a; const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 default: return PyLong_Type.tp_richcompare(op1, op2, Py_EQ); #else default: Py_RETURN_FALSE; #endif } } if (a == b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a == (double)b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } return PyObject_RichCompare(op1, op2, Py_EQ); } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* SetItemInt */ static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return -1; PyErr_Clear(); } } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) { #else if (is_list || PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = *type; tstate->exc_state.exc_value = *value; tstate->exc_state.exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value) { const unsigned char neg_one = (unsigned char) -1, const_zero = (unsigned char) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(unsigned char) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned char) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(unsigned char) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(unsigned char), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_unsigned_char(const char *itemp) { return (PyObject *) __Pyx_PyInt_From_unsigned_char(*(unsigned char *) itemp); } static CYTHON_INLINE int __pyx_memview_set_unsigned_char(const char *itemp, PyObject *obj) { unsigned char value = __Pyx_PyInt_As_unsigned_char(obj); if ((value == (unsigned char)-1) && PyErr_Occurred()) return 0; *(unsigned char *) itemp = value; return 1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE unsigned char __Pyx_PyInt_As_unsigned_char(PyObject *x) { const unsigned char neg_one = (unsigned char) -1, const_zero = (unsigned char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (unsigned char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned char) 0; case 1: __PYX_VERIFY_RETURN_INT(unsigned char, digit, digits[0]) case 2: if (8 * sizeof(unsigned char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) >= 2 * PyLong_SHIFT) { return (unsigned char) (((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])); } } break; case 3: if (8 * sizeof(unsigned char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) >= 3 * PyLong_SHIFT) { return (unsigned char) (((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])); } } break; case 4: if (8 * sizeof(unsigned char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) >= 4 * PyLong_SHIFT) { return (unsigned char) (((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (unsigned char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(unsigned char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned char) 0; case -1: __PYX_VERIFY_RETURN_INT(unsigned char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(unsigned char, digit, +digits[0]) case -2: if (8 * sizeof(unsigned char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT) { return (unsigned char) (((unsigned char)-1)*(((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case 2: if (8 * sizeof(unsigned char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT) { return (unsigned char) ((((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case -3: if (8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT) { return (unsigned char) (((unsigned char)-1)*(((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case 3: if (8 * sizeof(unsigned char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT) { return (unsigned char) ((((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case -4: if (8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 4 * PyLong_SHIFT) { return (unsigned char) (((unsigned char)-1)*(((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case 4: if (8 * sizeof(unsigned char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 4 * PyLong_SHIFT) { return (unsigned char) ((((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; } #endif if (sizeof(unsigned char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned char) -1; } } else { unsigned char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (unsigned char) -1; val = __Pyx_PyInt_As_unsigned_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned char"); return (unsigned char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned char"); return (unsigned char) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 3, &__Pyx_TypeInfo_unsigned_char, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_unsigned_char(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 2, &__Pyx_TypeInfo_unsigned_char, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_unsigned_char(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 3, &__Pyx_TypeInfo_unsigned_char, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) PyErr_Clear(); ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
GB_binop__max_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__max_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__max_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__max_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp32) // A*D function (colscale): GB (_AxD__max_fp32) // D*A function (rowscale): GB (_DxB__max_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__max_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__max_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp32) // C=scalar+B GB (_bind1st__max_fp32) // C=scalar+B' GB (_bind1st_tran__max_fp32) // C=A+scalar GB (_bind2nd__max_fp32) // C=A'+scalar GB (_bind2nd_tran__max_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = fmaxf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fmaxf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_FP32 || GxB_NO_MAX_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__max_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = fmaxf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = fmaxf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmaxf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmaxf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BPMaximalMatching.h
#ifndef BP_MAXIMAL_MATCHING_H #define BP_MAXIMAL_MATCHING_H #include "CombBLAS/CombBLAS.h" #include <iostream> #include <functional> #include <algorithm> #include <vector> #include <limits> #include "Utility.h" #include "MatchingDefs.h" #define NO_INIT 0 #define GREEDY 1 #define KARP_SIPSER 2 #define DMD 3 MTRand GlobalMT(123); // for reproducible result namespace combblas { // This is not tested with CSC yet // TODO: test with CSC and Setting SPA (similar to Weighted Greedy) template <typename Par_DCSC_Bool, typename IT> void MaximalMatching(Par_DCSC_Bool & A, Par_DCSC_Bool & AT, FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row, FullyDistVec<IT, IT>& degColRecv, int type, bool rand=true) { typedef VertexTypeML < IT, IT> VertexType; int nprocs, myrank; MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_rank(MPI_COMM_WORLD,&myrank); int nthreads = 1; #ifdef _OPENMP #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif FullyDistVec<IT, IT> degCol = degColRecv; //unmatched row and column vertices FullyDistSpVec<IT, IT> unmatchedRow(mateRow2Col, [](IT mate){return mate==-1;}); FullyDistSpVec<IT, IT> degColSG(A.getcommgrid(), A.getncol()); //FullyDistVec<IT, IT> degCol(A.getcommgrid()); //A.Reduce(degCol, Column, plus<IT>(), static_cast<IT>(0)); // Reduce is not multithreaded FullyDistSpVec<IT, VertexType> unmatchedCol(A.getcommgrid(), A.getncol()); // every veretx is unmatched. keep non-isolated vertices unmatchedCol = EWiseApply<VertexType>(unmatchedCol, degCol, [](VertexType vtx, IT deg){return VertexType();}, [](VertexType vtx, IT deg){return deg>0;}, true, VertexType()); FullyDistSpVec<IT, VertexType> fringeRow(A.getcommgrid(), A.getnrow()); FullyDistSpVec<IT, IT> fringeRow2(A.getcommgrid(), A.getnrow()); FullyDistSpVec<IT, VertexType> deg1Col(A.getcommgrid(), A.getncol()); IT curUnmatchedCol = unmatchedCol.getnnz(); IT curUnmatchedRow = unmatchedRow.getnnz(); IT newlyMatched = 1; // ensure the first pass of the while loop int iteration = 0; double tStart = MPI_Wtime(); std::vector<std::vector<double> > timing; #ifdef DETAIL_STATS if(myrank == 0) { cout << "=======================================================\n"; cout << "@@@@@@ Number of processes: " << nprocs << endl; cout << "=======================================================\n"; cout << "It | UMRow | UMCol | newlyMatched | Time "<< endl; cout << "=======================================================\n"; } #endif MPI_Barrier(MPI_COMM_WORLD); while(curUnmatchedCol !=0 && curUnmatchedRow!=0 && newlyMatched != 0 ) { unmatchedCol.ApplyInd([](VertexType vtx, IT idx){return VertexType(idx,idx);}); if(type==DMD) { unmatchedCol = EWiseApply<VertexType>(unmatchedCol, degCol, [](VertexType vtx, IT deg){return VertexType(vtx.parent,deg);}, [](VertexType vtx, IT deg){return true;}, false, VertexType()); } else if(rand) { unmatchedCol.Apply([](VertexType vtx){return VertexType(vtx.parent, static_cast<IT>((GlobalMT.rand() * 9999999)+1));}); } // ======================== step1: One step of BFS ========================= std::vector<double> times; double t1 = MPI_Wtime(); if(type==GREEDY) { SpMV<Select2ndMinSR<bool, VertexType>>(A, unmatchedCol, fringeRow, false); } else if(type==DMD) { SpMV<Select2ndMinSR<bool, VertexType>>(A, unmatchedCol, fringeRow, false); } else //(type==KARP_SIPSER) { deg1Col = EWiseApply<VertexType>(unmatchedCol, degCol, [](VertexType vtx, IT deg){return vtx;}, [](VertexType vtx, IT deg){return deg==1;}, false, VertexType()); if(deg1Col.getnnz()>9) SpMV<Select2ndMinSR<bool, VertexType>>(A, deg1Col, fringeRow, false); else SpMV<Select2ndMinSR<bool, VertexType>>(A, unmatchedCol, fringeRow, false); } // Remove matched row vertices fringeRow = EWiseApply<VertexType>(fringeRow, mateRow2Col, [](VertexType vtx, IT mate){return vtx;}, [](VertexType vtx, IT mate){return mate==-1;}, false, VertexType()); if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== // ======================== step2: Update matching ========================= fringeRow2 = EWiseApply<IT>(fringeRow, mateRow2Col, [](VertexType vtx, IT mate){return vtx.parent;}, [](VertexType vtx, IT mate){return true;}, false, VertexType()); FullyDistSpVec<IT, IT> newMatchedCols = fringeRow2.Invert(A.getncol()); FullyDistSpVec<IT, IT> newMatchedRows = newMatchedCols.Invert(A.getnrow()); mateCol2Row.Set(newMatchedCols); mateRow2Col.Set(newMatchedRows); if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== // =============== step3: Update degree of unmatched columns ================= unmatchedRow.Select(mateRow2Col, [](IT mate){return mate==-1;}); unmatchedCol.Select(mateCol2Row, [](IT mate){return mate==-1;}); if(type!=GREEDY) { // update degree newMatchedRows.Apply([](IT val){return 1;}); // needed if the matrix is Boolean since the SR::multiply isn't called SpMV< SelectPlusSR<bool, IT>>(AT, newMatchedRows, degColSG, false); // degree of column vertices to matched rows // subtract degree of column vertices degCol.EWiseApply(degColSG, [](IT old_deg, IT new_deg){return old_deg-new_deg;}, [](IT old_deg, IT new_deg){return true;}, false, static_cast<IT>(0)); // remove isolated vertices unmatchedCol = EWiseApply<VertexType>(unmatchedCol, degCol, [](VertexType vtx, IT deg){return vtx;}, [](VertexType vtx, IT deg){return deg>0;}, false, VertexType()); } if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== ++iteration; newlyMatched = newMatchedCols.getnnz(); times.push_back(std::accumulate(times.begin(), times.end(), 0.0)); timing.push_back(times); #ifdef DETAIL_STATS if(myrank == 0) { printf("%3d %10lld %10lld %10lld %18lf\n", iteration , curUnmatchedRow, curUnmatchedCol, newlyMatched, times.back()); } #endif curUnmatchedCol = unmatchedCol.getnnz(); curUnmatchedRow = unmatchedRow.getnnz(); MPI_Barrier(MPI_COMM_WORLD); } IT cardinality = mateRow2Col.Count([](IT mate){return mate!=-1;}); std::vector<double> totalTimes(timing[0].size(),0); for(int i=0; i<timing.size(); i++) { for(int j=0; j<timing[i].size(); j++) { totalTimes[j] += timing[i][j]; } } if(myrank == 0) { #ifdef DETAIL_STATS cout << "==========================================================\n"; cout << "\n================individual timings =======================\n"; cout << " SpMV Update-Match Update-UMC Total "<< endl; cout << "==========================================================\n"; for(int i=0; i<timing.size(); i++) { for(int j=0; j<timing[i].size(); j++) { printf("%12.5lf ", timing[i][j]); } cout << endl; } cout << "-------------------------------------------------------\n"; for(int i=0; i<totalTimes.size(); i++) printf("%12.5lf ", totalTimes[i]); cout << endl; #endif std::cout << "****** maximal matching runtime ********\n"; std::cout << "nprocesses nthreads ncores algorithm Unmatched-Rows Cardinality Total Time***\n"; std::cout << nprocs << " " << nthreads << " " << nprocs * nthreads << " "; if(type == DMD) std::cout << "DMD"; else if(type == GREEDY) std::cout << "Greedy"; else if(type == KARP_SIPSER) std::cout << "Karp-Sipser"; if(rand && (type == KARP_SIPSER || type == GREEDY) ) std::cout << "-rand"; std::cout << " "; printf("%lld %lld %lf\n", curUnmatchedRow, cardinality, totalTimes.back()); std::cout << "-------------------------------------------------------\n\n"; } //isMatching(mateCol2Row, mateRow2Col); } // Special version of the greedy algorithm (works for both CSC (multithreaded) and DCSC) // That uses WeightMaxSR semiring // TODO: check if this is 1/2 approx of the weighted matching (probably no) // TODO: should we remove degCol? // TODO: can be merged with the generalized MaximalMatching template <typename Par_MAT_Double, typename IT> void WeightedGreedy(Par_MAT_Double & A, FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row, FullyDistVec<IT, IT>& degCol) { typedef VertexTypeML < IT, double> VertexType; int nthreads=1; #ifdef THREADED #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif PreAllocatedSPA<VertexType> SPA(A.seq(), nthreads*4); int nprocs, myrank; MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_rank(MPI_COMM_WORLD,&myrank); //unmatched row and column vertices FullyDistSpVec<IT, IT> unmatchedRow(mateRow2Col, [](IT mate){return mate==-1;}); FullyDistSpVec<IT, VertexType> unmatchedCol(A.getcommgrid(), A.getncol()); // every veretx is unmatched. keep non-isolated vertices unmatchedCol = EWiseApply<VertexType>(unmatchedCol, degCol, [](VertexType vtx, IT deg){return VertexType();}, [](VertexType vtx, IT deg){return deg>0;}, true, VertexType()); FullyDistSpVec<IT, VertexType> fringeRow(A.getcommgrid(), A.getnrow()); FullyDistSpVec<IT, IT> fringeRow2(A.getcommgrid(), A.getnrow()); FullyDistSpVec<IT, VertexType> fringeRow3(A.getcommgrid(), A.getnrow()); IT curUnmatchedCol = unmatchedCol.getnnz(); IT curUnmatchedRow = unmatchedRow.getnnz(); IT newlyMatched = 1; // ensure the first pass of the while loop int iteration = 0; std::vector<std::vector<double> > timing; #ifdef DETAIL_STATS if(myrank == 0) { cout << "=======================================================\n"; cout << "@@@@@@ Number of processes: " << nprocs << endl; cout << "=======================================================\n"; cout << "It | UMRow | UMCol | newlyMatched | Time "<< endl; cout << "=======================================================\n"; } #endif MPI_Barrier(MPI_COMM_WORLD); while(curUnmatchedCol !=0 && curUnmatchedRow!=0 && newlyMatched != 0 ) { // anything is fine in the second argument unmatchedCol.ApplyInd([](VertexType vtx, IT idx){return VertexType(idx,idx);}); // ======================== step1: One step of BFS ========================= std::vector<double> times; double t1 = MPI_Wtime(); SpMV<WeightMaxMLSR<double, VertexType>>(A, unmatchedCol, fringeRow, false, SPA); // Remove matched row vertices fringeRow = EWiseApply<VertexType>(fringeRow, mateRow2Col, [](VertexType vtx, IT mate){return vtx;}, [](VertexType vtx, IT mate){return mate==-1;}, false, VertexType()); if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== // ======================== step2: Update matching ========================= fringeRow2 = EWiseApply<IT>(fringeRow, mateRow2Col, [](VertexType vtx, IT mate){return vtx.parent;}, [](VertexType vtx, IT mate){return true;}, false, VertexType()); FullyDistSpVec<IT, IT> newMatchedCols = fringeRow2.Invert(A.getncol()); FullyDistSpVec<IT, IT> newMatchedRows = newMatchedCols.Invert(A.getnrow()); mateCol2Row.Set(newMatchedCols); mateRow2Col.Set(newMatchedRows); if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== // =============== step3: Update unmatched columns and rows ================= unmatchedRow.Select(mateRow2Col, [](IT mate){return mate==-1;}); unmatchedCol.Select(mateCol2Row, [](IT mate){return mate==-1;}); if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== ++iteration; newlyMatched = newMatchedCols.getnnz(); times.push_back(std::accumulate(times.begin(), times.end(), 0.0)); timing.push_back(times); #ifdef DETAIL_STATS if(myrank == 0) { printf("%3d %10lld %10lld %10lld %18lf\n", iteration , curUnmatchedRow, curUnmatchedCol, newlyMatched, times.back()); } #endif curUnmatchedCol = unmatchedCol.getnnz(); curUnmatchedRow = unmatchedRow.getnnz(); MPI_Barrier(MPI_COMM_WORLD); } IT cardinality = mateRow2Col.Count([](IT mate){return mate!=-1;}); std::vector<double> totalTimes(timing[0].size(),0); for(int i=0; i<timing.size(); i++) { for(int j=0; j<timing[i].size(); j++) { totalTimes[j] += timing[i][j]; } } if(myrank == 0) { #ifdef DETAIL_STATS cout << "==========================================================\n"; cout << "\n================individual timings =======================\n"; cout << " SpMV Update-Match Update-UMC Total "<< endl; cout << "==========================================================\n"; for(int i=0; i<timing.size(); i++) { for(int j=0; j<timing[i].size(); j++) { printf("%12.5lf ", timing[i][j]); } cout << endl; } cout << "-------------------------------------------------------\n"; for(int i=0; i<totalTimes.size(); i++) printf("%12.5lf ", totalTimes[i]); cout << endl; #endif #ifdef TIMING std::cout << "****** maximal matching runtime ********\n"; std::cout << "Unmatched-Rows Cardinality Total Time***\n"; printf("%lld %lld %lf\n", curUnmatchedRow, cardinality, totalTimes.back()); std::cout << "-------------------------------------------------------\n\n"; #endif } //isMatching(mateCol2Row, mateRow2Col); } template <class Par_DCSC_Bool, class IT, class NT> bool isMaximalmatching(Par_DCSC_Bool & A, FullyDistVec<IT,NT> & mateRow2Col, FullyDistVec<IT,NT> & mateCol2Row) { typedef VertexTypeML < IT, IT> VertexType; int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); FullyDistSpVec<IT, IT> fringeRow(A.getcommgrid(), A.getnrow()); FullyDistSpVec<IT, IT> fringeCol(A.getcommgrid(), A.getncol()); FullyDistSpVec<IT, IT> unmatchedRow(mateRow2Col, [](IT mate){return mate==-1;}); FullyDistSpVec<IT, IT> unmatchedCol(mateCol2Row, [](IT mate){return mate==-1;}); unmatchedRow.setNumToInd(); unmatchedCol.setNumToInd(); SpMV<Select2ndMinSR<bool, VertexType>>(A, unmatchedCol, fringeRow, false); fringeRow = EWiseMult(fringeRow, mateRow2Col, true, (IT) -1); if(fringeRow.getnnz() != 0) { if(myrank == 0) std::cout << "Not maximal matching!!\n"; return false; } Par_DCSC_Bool tA = A; tA.Transpose(); SpMV<Select2ndMinSR<bool, VertexType>>(tA, unmatchedRow, fringeCol, false); fringeCol = EWiseMult(fringeCol, mateCol2Row, true, (IT) -1); if(fringeCol.getnnz() != 0) { if(myrank == 0) std::cout << "Not maximal matching**!!\n"; return false; } return true; } } #endif
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait == UndefinedPixelTrait) && (draw_info->fill.alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; start=0; s=segment_stack; PushSegmentStack(y_offset,x_offset,x_offset,1); PushSegmentStack(y_offset+1,x_offset,x_offset,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelViaPixelInfo(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method,const StopInfo *stops, const size_t number_stops,ExceptionInfo *exception) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(stops != (const StopInfo *) NULL); assert(number_stops > 0); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1.0)*cosine)+ fabs((double) (image->rows-1.0)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/ 2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)* (image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1.0)/2.0; gradient->radii.y=(double) (image->rows-1.0)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) (MagickMin((image->columns-1.0), (image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=number_stops; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memcpy(gradient->stops,stops,(size_t) number_stops* sizeof(*stops)); /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel = GetPixelChannelChannel(linear_image,i); PixelTrait traits = GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if ((paint_traits & CopyPixelTrait) != 0) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill argument. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target, % const PixelInfo *fill,const MagickBooleanType invert, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,fill,&conform_fill,exception); ConformPixelInfo(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert) { PixelTrait traits; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(image,(Quantum) conform_fill.red,q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(image,(Quantum) conform_fill.green,q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(image,(Quantum) conform_fill.blue,q); traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlack(image,(Quantum) conform_fill.black,q); traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelAlpha(image,(Quantum) conform_fill.alpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImage) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_binop__pair_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // A pattern? 1 // B type: double // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unaryop__ainv_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_uint32 // op(A') function: GB_tran__ainv_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_uint32 ( bool *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_1x1_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack8to4_int8_msa(bottom_im2col, top_blob, kernel, opt); } static void conv1x1s2_sgemm_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const int64_t* r0 = bottom_blob.channel(p); int64_t* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_pack8to4_int8_msa(bottom_blob_shrinked, top_blob, kernel, opt); }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class IfStmtBitfields { friend class IfStmt; unsigned : NumStmtBits; unsigned IsConstexpr : 1; }; class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 17 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. unsigned BasePathIsEmpty : 1; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class OpaqueValueExprBitfields { friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expressio if this /// bit is set to true. unsigned IsUnique : 1; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; IfStmtBitfields IfStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; OpaqueValueExprBitfields OpaqueValueExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; CoawaitExprBitfields CoawaitBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) == sizeof(void *), "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, const ASTContext *Context = nullptr) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *IgnoreImplicit() const { return const_cast<Stmt *>(this)->IgnoreImplicit(); } /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const LLVM_READONLY { return getBeginLoc(); } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro = false; public: friend class ASTStmtReader; friend class ASTStmtWriter; NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; SourceLocation LBraceLoc, RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt* const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {} SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) {} SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getLocEnd(); } child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public Stmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getLocEnd(); } child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt : public Stmt { enum { INIT, VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, bool IsConstexpr, Stmt *init, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) {} /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { INIT, VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, Stmt *Init, VarDecl *Var, Expr *cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) {} /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) {} /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) {} const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
GB_binop__band_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_int16) // A.*B function (eWiseMult): GB (_AemultB_08__band_int16) // A.*B function (eWiseMult): GB (_AemultB_02__band_int16) // A.*B function (eWiseMult): GB (_AemultB_04__band_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__band_int16) // C+=b function (dense accum): GB (_Cdense_accumb__band_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int16) // C=scalar+B GB (_bind1st__band_int16) // C=scalar+B' GB (_bind1st_tran__band_int16) // C=A+scalar GB (_bind2nd__band_int16) // C=A'+scalar GB (_bind2nd_tran__band_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_INT16 || GxB_NO_BAND_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__band_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__band_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__band_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
5239.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop #pragma omp parallel num_threads(4) { #pragma omp for schedule(dynamic, 8) for (i = 0; i < _PB_NY; i++) y[i] = 0; #pragma omp for private (j) schedule(dynamic, 8) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
build_HF_mat.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <omp.h> #include "linalg_lib_wrapper.h" #include "utils.h" #include "TinyDFT_typedef.h" #include "build_HF_mat.h" #include "ket_sp_list.h" #include "acc_JKmat.h" #include "libCMS.h" void TinyDFT_build_Hcore_S_X_mat(TinyDFT_p TinyDFT, double *Hcore_mat, double *S_mat, double *X_mat) { assert(TinyDFT != NULL); int nbf = TinyDFT->nbf; int nshell = TinyDFT->nshell; int mat_size = TinyDFT->mat_size; int *shell_bf_sind = TinyDFT->shell_bf_sind; int *shell_bf_num = TinyDFT->shell_bf_num; Simint_p simint = TinyDFT->simint; BasisSet_p basis = TinyDFT->basis; // Compute core Hamiltonian and overlap matrix memset(Hcore_mat, 0, DBL_MSIZE * mat_size); memset(S_mat, 0, DBL_MSIZE * mat_size); #pragma omp parallel for schedule(dynamic) for (int M = 0; M < nshell; M++) { int tid = omp_get_thread_num(); for (int N = 0; N < nshell; N++) { int nint, offset, nrows, ncols; double *integrals, *S_ptr, *Hcore_ptr; offset = shell_bf_sind[M] * nbf + shell_bf_sind[N]; S_ptr = S_mat + offset; Hcore_ptr = Hcore_mat + offset; nrows = shell_bf_num[M]; ncols = shell_bf_num[N]; // Compute the contribution of current shell pair to core Hamiltonian matrix CMS_Simint_calc_pair_ovlp(simint, tid, M, N, &integrals, &nint); if (nint > 0) copy_matrix_block(sizeof(double), nrows, ncols, integrals, ncols, S_ptr, nbf); // Compute the contribution of current shell pair to overlap matrix CMS_Simint_calc_pair_Hcore(basis, simint, tid, M, N, &integrals, &nint); if (nint > 0) copy_matrix_block(sizeof(double), nrows, ncols, integrals, ncols, Hcore_ptr, nbf); } } // Construct basis transformation matrix double *workbuf = (double*) malloc(sizeof(double) * (2 * mat_size + nbf)); assert(workbuf != NULL); double *U_mat = workbuf; double *U0_mat = U_mat + mat_size; double *eigval = U0_mat + mat_size; // [U, D] = eig(S); memcpy(U_mat, S_mat, DBL_MSIZE * mat_size); LAPACKE_dsyev(LAPACK_ROW_MAJOR, 'V', 'U', nbf, U_mat, nbf, eigval); // U_mat will be overwritten by eigenvectors // X = U * D^{-1/2} * U'^T memcpy(U0_mat, U_mat, DBL_MSIZE * mat_size); int cnt = 0; double S_ev_thres = 1.0e-5; // This is the default threshold used in NWChem for (int i = 0; i < nbf; i++) { if (eigval[i] >= S_ev_thres) { eigval[i] = 1.0 / sqrt(eigval[i]); cnt++; } else { eigval[i] = 0.0; } } printf("Eliminating linear dependency in overlap matrix S: %d eigenvalues < %.2e are removed\n", nbf - cnt, S_ev_thres); for (int i = 0; i < nbf; i++) { #pragma omp simd for (int j = 0; j < nbf; j++) U0_mat[i * nbf + j] *= eigval[j]; } cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasTrans, nbf, nbf, nbf, 1.0, U0_mat, nbf, U_mat, nbf, 0.0, X_mat, nbf ); free(workbuf); } // Get the final J and K matrices: J = (J + J^T) / 2, K = (K + K^T) / 2 static void TinyDFT_finalize_JKmat(const int nbf, double *J_mat, double *K_mat, const int build_J, const int build_K) { if (build_J == 1 && build_K == 1) { #pragma omp for schedule(dynamic) for (int irow = 0; irow < nbf; irow++) { for (int icol = irow + 1; icol < nbf; icol++) { int idx1 = irow * nbf + icol; int idx2 = icol * nbf + irow; double Jval = (J_mat[idx1] + J_mat[idx2]) * 0.5; double Kval = (K_mat[idx1] + K_mat[idx2]) * 0.5; J_mat[idx1] = Jval; J_mat[idx2] = Jval; K_mat[idx1] = Kval; K_mat[idx2] = Kval; } } } if (build_J == 1 && build_K == 0) { #pragma omp for schedule(dynamic) for (int irow = 0; irow < nbf; irow++) { for (int icol = irow + 1; icol < nbf; icol++) { int idx1 = irow * nbf + icol; int idx2 = icol * nbf + irow; double Jval = (J_mat[idx1] + J_mat[idx2]) * 0.5; J_mat[idx1] = Jval; J_mat[idx2] = Jval; } } } if (build_J == 0 && build_K == 1) { #pragma omp for schedule(dynamic) for (int irow = 0; irow < nbf; irow++) { for (int icol = irow + 1; icol < nbf; icol++) { int idx1 = irow * nbf + icol; int idx2 = icol * nbf + irow; double Kval = (K_mat[idx1] + K_mat[idx2]) * 0.5; K_mat[idx1] = Kval; K_mat[idx2] = Kval; } } } } static void TinyDFT_JKblkmat_to_JKmat( const int nshell, const int nbf, const int *shell_bf_num, const int *shell_bf_sind, const int *blk_mat_ptr, const double *J_blk_mat, const double *K_blk_mat, double *J_mat, double *K_mat, const int build_J, const int build_K ) { if (build_J) { #pragma omp for for (int i = 0; i < nshell; i++) { for (int j = 0; j < nshell; j++) { int Jblk_offset = blk_mat_ptr[i * nshell + j]; int J_offset = shell_bf_sind[i] * nbf + shell_bf_sind[j]; copy_matrix_block( sizeof(double), shell_bf_num[i], shell_bf_num[j], J_blk_mat + Jblk_offset, shell_bf_num[j], J_mat + J_offset, nbf ); } } } if (build_K) { #pragma omp for for (int i = 0; i < nshell; i++) { for (int j = 0; j < nshell; j++) { int Kblk_offset = blk_mat_ptr[i * nshell + j]; int K_offset = shell_bf_sind[i] * nbf + shell_bf_sind[j]; copy_matrix_block( sizeof(double), shell_bf_num[i], shell_bf_num[j], K_blk_mat + Kblk_offset, shell_bf_num[j], K_mat + K_offset, nbf ); } } } } static void TinyDFT_Dmat_to_Dblkmat( const int nshell, const int nbf, const int *shell_bf_num, const int *shell_bf_sind, const int *blk_mat_ptr, const double *D_mat, double *D_blk_mat ) { #pragma omp for for (int i = 0; i < nshell; i++) { for (int j = 0; j < nshell; j++) { int Dblk_offset = blk_mat_ptr[i * nshell + j]; int D_offset = shell_bf_sind[i] * nbf + shell_bf_sind[j]; copy_matrix_block( sizeof(double), shell_bf_num[i], shell_bf_num[j], D_mat + D_offset, nbf, D_blk_mat + Dblk_offset, shell_bf_num[j] ); } } } void TinyDFT_build_JKmat(TinyDFT_p TinyDFT, const double *D_mat, double *J_mat, double *K_mat) { int nbf = TinyDFT->nbf; int nshell = TinyDFT->nshell; int num_valid_sp = TinyDFT->num_valid_sp; int max_dim = TinyDFT->max_dim; int mat_size = TinyDFT->mat_size; int max_JKacc_buf = TinyDFT->max_JKacc_buf; int *shell_bf_num = TinyDFT->shell_bf_num; int *shell_bf_sind = TinyDFT->shell_bf_sind; int *valid_sp_lid = TinyDFT->valid_sp_lid; int *valid_sp_rid = TinyDFT->valid_sp_rid; int *blk_mat_ptr = TinyDFT->blk_mat_ptr; int *Mpair_flag = TinyDFT->Mpair_flag; int *Npair_flag = TinyDFT->Npair_flag; double scrtol2 = TinyDFT->shell_scrtol2; double *sp_scrval = TinyDFT->sp_scrval; double *J_blk_mat = TinyDFT->J_blk_mat; double *K_blk_mat = TinyDFT->K_blk_mat; double *D_blk_mat = TinyDFT->D_blk_mat; double *FM_strip_buf = TinyDFT->FM_strip_buf; double *FN_strip_buf = TinyDFT->FN_strip_buf; Simint_p simint = TinyDFT->simint; int build_J = (J_mat == NULL) ? 0 : 1; int build_K = (K_mat == NULL) ? 0 : 1; if (build_J == 0 && build_K == 0) return; if (build_J) memset(J_blk_mat, 0, DBL_MSIZE * mat_size); if (build_K) memset(K_blk_mat, 0, DBL_MSIZE * mat_size); #pragma omp parallel { int tid = omp_get_thread_num(); TinyDFT_Dmat_to_Dblkmat( nshell, nbf, shell_bf_num, shell_bf_sind, blk_mat_ptr, D_mat, D_blk_mat ); // Create ERI batching auxiliary data structures // Ket-side shell pair lists that needs to be computed ThreadKetShellpairLists_p thread_ksp_lists; create_ThreadKetShellpairLists(&thread_ksp_lists); // Simint multi_shellpair buffer for batched ERI computation void *thread_multi_shellpair; CMS_Simint_create_multi_sp(&thread_multi_shellpair); double *thread_FM_strip_buf = FM_strip_buf + tid * nbf * max_dim; double *thread_FN_strip_buf = FN_strip_buf + tid * nbf * max_dim; int *thread_Mpair_flag = Mpair_flag + tid * nshell; int *thread_Npair_flag = Npair_flag + tid * nshell; #pragma omp for schedule(dynamic) for (int MN = 0; MN < num_valid_sp; MN++) { int M = valid_sp_lid[MN]; int N = valid_sp_rid[MN]; double scrval1 = sp_scrval[M * nshell + N]; double *J_MN_buf = TinyDFT->JKacc_buf + tid * max_JKacc_buf; double *J_MN = J_blk_mat + blk_mat_ptr[M * nshell + N]; int dimM = shell_bf_num[M], dimN = shell_bf_num[N]; if (build_J) memset(J_MN_buf, 0, sizeof(double) * dimM * dimN); if (build_K) { memset(thread_FM_strip_buf, 0, sizeof(double) * nbf * max_dim); memset(thread_FN_strip_buf, 0, sizeof(double) * nbf * max_dim); memset(thread_Mpair_flag, 0, sizeof(int) * nshell); memset(thread_Npair_flag, 0, sizeof(int) * nshell); } for (int PQ = 0; PQ < num_valid_sp; PQ++) { int P = valid_sp_lid[PQ]; int Q = valid_sp_rid[PQ]; double scrval2 = sp_scrval[P * nshell + Q]; // Symmetric uniqueness check, from GTFock if ((M > P && (M + P) % 2 == 1) || (M < P && (M + P) % 2 == 0)) continue; if ((M == P) && ((N > Q && (N + Q) % 2 == 1) || (N < Q && (N + Q) % 2 == 0))) continue; // Shell screening if (fabs(scrval1 * scrval2) <= scrtol2) continue; // Push ket-side shell pair to corresponding list int ket_id = CMS_Simint_get_sp_AM_idx(simint, P, Q); KetShellpairList_p dst_sp_list = &thread_ksp_lists->ket_shellpair_lists[ket_id]; add_shellpair_to_KetShellPairList(dst_sp_list, P, Q); // If the ket-side shell pair list we just used is full, handle it if (dst_sp_list->npairs == MAX_LIST_SIZE) { double *thread_batch_eris; int thread_nints; // Compute batched ERIs CMS_Simint_calc_shellquartet_batch( simint, tid, M, N, dst_sp_list->npairs, dst_sp_list->P_list, dst_sp_list->Q_list, &thread_batch_eris, &thread_nints, &thread_multi_shellpair ); // Accumulate ERI results to global matrices if (thread_nints > 0) { double st = get_wtime_sec(); acc_JKmat_with_ket_sp_list( TinyDFT, tid, M, N, dst_sp_list->npairs, dst_sp_list->P_list, dst_sp_list->Q_list, thread_batch_eris, thread_nints, thread_FM_strip_buf, thread_FN_strip_buf, thread_Mpair_flag, thread_Npair_flag, build_J, build_K ); double et = get_wtime_sec(); if (tid == 0) CMS_Simint_add_accF_timer(simint, et - st); } // Reset the computed ket-side shell pair list dst_sp_list->npairs = 0; } // End of "if (dst_sp_list->npairs == MAX_LIST_SIZE)" } // End of PQ loop // Handles all non-empty ket-side shell pair lists for (int ket_id = 0; ket_id < MAX_AM_PAIRS; ket_id++) { KetShellpairList_p dst_sp_list = &thread_ksp_lists->ket_shellpair_lists[ket_id]; if (dst_sp_list->npairs > 0) { double *thread_batch_eris; int thread_nints; // Compute batched ERIs CMS_Simint_calc_shellquartet_batch( simint, tid, M, N, dst_sp_list->npairs, dst_sp_list->P_list, dst_sp_list->Q_list, &thread_batch_eris, &thread_nints, &thread_multi_shellpair ); // Accumulate ERI results to global matrices if (thread_nints > 0) { double st = get_wtime_sec(); acc_JKmat_with_ket_sp_list( TinyDFT, tid, M, N, dst_sp_list->npairs, dst_sp_list->P_list, dst_sp_list->Q_list, thread_batch_eris, thread_nints, thread_FM_strip_buf, thread_FN_strip_buf, thread_Mpair_flag, thread_Npair_flag, build_J, build_K ); double et = get_wtime_sec(); if (tid == 0) CMS_Simint_add_accF_timer(simint, et - st); } // Reset the computed ket-side shell pair list dst_sp_list->npairs = 0; } // End of "if (dst_sp_list->npairs > 0)" } // End of ket_id loop // Accumulate thread-local J and K results to global J and K mat if (build_J) atomic_add_vector(J_MN, J_MN_buf, dimM * dimN); if (build_K) { int FM_strip_offset = blk_mat_ptr[M * nshell]; int FN_strip_offset = blk_mat_ptr[N * nshell]; for (int iPQ = 0; iPQ < nshell; iPQ++) { int dim_iPQ = shell_bf_num[iPQ]; if (thread_Mpair_flag[iPQ]) { int MPQ_blk_ptr = blk_mat_ptr[M * nshell + iPQ]; double *K_blk_ptr = K_blk_mat + MPQ_blk_ptr; double *thread_FM_strip_blk_ptr = thread_FM_strip_buf + MPQ_blk_ptr - FM_strip_offset; atomic_add_vector(K_blk_ptr, thread_FM_strip_blk_ptr, dimM * dim_iPQ); } if (thread_Npair_flag[iPQ]) { int NPQ_blk_ptr = blk_mat_ptr[N * nshell + iPQ]; double *K_blk_ptr = K_blk_mat + NPQ_blk_ptr; double *thread_FN_strip_blk_ptr = thread_FN_strip_buf + NPQ_blk_ptr - FN_strip_offset; atomic_add_vector(K_blk_ptr, thread_FN_strip_blk_ptr, dimN * dim_iPQ); } } // End of iPQ loop } // End of "if (build_K)" } // End of MN loop TinyDFT_JKblkmat_to_JKmat( nshell, nbf, shell_bf_num, shell_bf_sind, blk_mat_ptr, J_blk_mat, K_blk_mat, J_mat, K_mat, build_J, build_K ); TinyDFT_finalize_JKmat(nbf, J_mat, K_mat, build_J, build_K); CMS_Simint_free_multi_sp(thread_multi_shellpair); free_ThreadKetShellpairLists(thread_ksp_lists); } // End of "#pragma omp parallel" } void TinyDFT_calc_HF_energy( const int mat_size, const double *D_mat, const double *Hcore_mat, const double *J_mat, const double *K_mat, double *E_one_elec, double *E_two_elec, double *E_HF_exchange ) { double Eoe = 0.0, Ete = 0.0, Exc = 0.0; if (K_mat != NULL) { #pragma omp parallel for simd reduction(+:Eoe, Ete, Exc) for (int i = 0; i < mat_size; i++) { Eoe += D_mat[i] * Hcore_mat[i]; Ete += D_mat[i] * J_mat[i]; Exc += D_mat[i] * K_mat[i]; } Eoe *= 2.0; Ete *= 2.0; Exc *= -1.0; *E_one_elec = Eoe; *E_two_elec = Ete; *E_HF_exchange = Exc; } else { #pragma omp parallel for simd reduction(+:Eoe, Ete) for (int i = 0; i < mat_size; i++) { Eoe += D_mat[i] * Hcore_mat[i]; Ete += D_mat[i] * J_mat[i]; } Eoe *= 2.0; Ete *= 2.0; *E_one_elec = Eoe; *E_two_elec = Ete; } }
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); switch (storage) { case CharPixel: image->depth=8*sizeof(unsigned char); break; case DoublePixel: image->depth=8*sizeof(double); break; case FloatPixel: image->depth=8*sizeof(float); break; case LongPixel: image->depth=8*sizeof(unsigned long); break; case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break; case ShortPixel: image->depth=8*sizeof(unsigned short); break; default: break; } length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(read_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickTimeExtent]; const char *option; const StringInfo *profile; ssize_t option_type; static const char *source_date_epoch = (const char *) NULL; static MagickBooleanType epoch_initalized = MagickFalse; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if ((*magick_path == '\0') && (*next->magick == '\0')) (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; (void) GetImageProperty(next,"exif:*",exception); (void) GetImageProperty(next,"icc:*",exception); (void) GetImageProperty(next,"iptc:*",exception); (void) GetImageProperty(next,"xmp:*",exception); value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); if (epoch_initalized == MagickFalse) { source_date_epoch=getenv("SOURCE_DATE_EPOCH"); epoch_initalized=MagickTrue; } if (source_date_epoch == (const char *) NULL) { (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); } option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); if (GetBlobError(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); blob=Base64Decode(++p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; for (p=content; (*p != '/') && (*p != '\0'); p++) ; if (*p != '\0') { char *q; ssize_t i; /* Extract media type. */ if (LocaleNCompare(++p,"x-",2) == 0) p+=2; (void) strcpy(read_info->filename,"data."); q=read_info->filename+5; for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++) *q++=(*p++); *q++='\0'; } image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); if (GetBlobError(image) != MagickFalse) ThrowWriterException(FileOpenError,"UnableToWriteFile"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
GB_unaryop__ainv_uint16_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_int64 // op(A') function: GB_tran__ainv_uint16_int64 // C type: uint16_t // A type: int64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_int64 ( uint16_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bdd_sequential_base.h
#pragma once #include <vector> #include <array> #include <Eigen/SparseCore> #include "bdd_collection/bdd_collection.h" #include "two_dimensional_variable_array.hxx" #include <fstream> #include <cstdlib> #include <filesystem> #include <unordered_set> #include "time_measure_util.h" #include "atomic_ref.hpp" namespace LPMP { // store BDDs one after the other. // allows for efficient computation of min marginals and parallel mma template<typename BDD_BRANCH_NODE> class bdd_sequential_base { public: using value_type = typename BDD_BRANCH_NODE::value_type; bdd_sequential_base(BDD::bdd_collection& bdd_col) { add_bdds(bdd_col); } void add_bdds(BDD::bdd_collection& bdd_col); size_t nr_bdds() const; size_t nr_bdds(const size_t var) const; size_t nr_variables() const; size_t nr_variables(const size_t bdd_nr) const; size_t variable(const size_t bdd_nr, const size_t bdd_index) const; size_t nr_bdd_variables() const; double lower_bound(); using vector_type = Eigen::Matrix<typename BDD_BRANCH_NODE::value_type, Eigen::Dynamic, 1>; vector_type lower_bound_per_bdd(); void forward_run(); void backward_run(); void backward_run(const size_t bdd_nr); //two_dim_variable_array<std::array<value_type,2>> min_marginals(); two_dim_variable_array<std::array<double,2>> min_marginals(); using min_marginal_type = Eigen::Matrix<typename BDD_BRANCH_NODE::value_type, Eigen::Dynamic, 2>; std::tuple<min_marginal_type, std::vector<char>> min_marginals_stacked(); template<typename COST_ITERATOR> void update_costs(COST_ITERATOR cost_lo_begin, COST_ITERATOR cost_lo_end, COST_ITERATOR cost_hi_begin, COST_ITERATOR cost_hi_end); // TODO: remove these! // void update_costs(const two_dim_variable_array<std::array<value_type,2>>& delta); void update_costs(const min_marginal_type& delta); vector_type get_costs(); void update_costs(const vector_type& delta); ///////////////////////// template<typename ITERATOR> void fix_variables(ITERATOR zero_fixations_begin, ITERATOR zero_fixations_end, ITERATOR one_fixations_begin, ITERATOR one_fixations_end); // make a step that is guaranteed to be non-decreasing in the lower bound. void diffusion_step(const two_dim_variable_array<std::array<value_type,2>>& min_margs, const value_type damping_step = 1.0); // compute incremental min marginals and perform min-marginal averaging subsequently void parallel_mma(); void forward_mm(const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute); value_type backward_mm(const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute); void distribute_delta(); // Both operations below are inverses of each other // Given elements in order bdd_nr/bdd_index, transpose to variable/bdd_index with same variable. template<typename T> two_dim_variable_array<T> transpose_to_var_order(const two_dim_variable_array<T>& m) const; // Given elements in order var/bdd_index with same variable, transpose to bdd_nr/bdd_index. template<typename T> two_dim_variable_array<T> transpose_to_bdd_order(const two_dim_variable_array<T>& m) const; Eigen::SparseMatrix<value_type> Lagrange_constraint_matrix() const; void export_graphviz(const char* filename); void export_graphviz(const std::string& filename); template<typename STREAM> void export_graphviz(STREAM& s, const size_t bdd_nr); private: enum class message_passing_state { after_forward_pass, after_backward_pass, none } message_passing_state_ = message_passing_state::none; enum class lower_bound_state { valid, invalid } lower_bound_state_ = lower_bound_state::invalid; double lower_bound_ = -std::numeric_limits<double>::infinity(); double constant_ = 0.0; double compute_lower_bound(); double compute_lower_bound_after_forward_pass(); double compute_lower_bound_after_backward_pass(); vector_type lower_bound_per_bdd_after_forward_pass(); vector_type lower_bound_per_bdd_after_backward_pass(); std::array<size_t,2> bdd_range(const size_t bdd_nr) const; std::array<size_t,2> bdd_index_range(const size_t bdd_nr, const size_t bdd_idx) const; std::vector<BDD_BRANCH_NODE> bdd_branch_nodes_; // holds ranges of bdd branch instructions of specific bdd with specific variable struct bdd_variable { size_t offset; size_t variable; }; two_dim_variable_array<bdd_variable> bdd_variables_; std::vector<size_t> nr_bdds_per_variable_; // for parallel mma std::vector<std::array<value_type,2>> mms_to_collect_; std::vector<std::array<value_type,2>> mms_to_distribute_; }; //////////////////// // implementation // //////////////////// template<typename BDD_BRANCH_NODE> size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_bdds() const { assert(bdd_variables_.size() > 0); return bdd_variables_.size() - 1; } template<typename BDD_BRANCH_NODE> size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_variables() const { return nr_bdds_per_variable_.size(); } template<typename BDD_BRANCH_NODE> size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_bdds(const size_t variable) const { assert(variable < nr_variables()); return nr_bdds_per_variable_[variable]; } template<typename BDD_BRANCH_NODE> size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_variables(const size_t bdd_nr) const { assert(bdd_nr < nr_bdds()); assert(bdd_variables_.size(bdd_nr) > 0); return bdd_variables_.size(bdd_nr) - 1; } template<typename BDD_BRANCH_NODE> size_t bdd_sequential_base<BDD_BRANCH_NODE>::variable(const size_t bdd_nr, const size_t bdd_index) const { assert(bdd_nr < nr_bdds()); assert(bdd_index < nr_variables(bdd_nr)); return bdd_variables_(bdd_nr, bdd_index).variable; } template<typename BDD_BRANCH_NODE> size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_bdd_variables() const { return std::accumulate(nr_bdds_per_variable_.begin(), nr_bdds_per_variable_.end(), 0); } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::add_bdds(BDD::bdd_collection& bdd_col) { message_passing_state_ = message_passing_state::none; assert(bdd_branch_nodes_.size() == 0); // currently does not support incremental addition of BDDs bdd_branch_nodes_.clear(); const size_t total_nr_bdd_nodes = [&]() { size_t i=0; for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr) i += bdd_col.nr_bdd_nodes(bdd_nr)-2; // do not count terminal nodes return i; }(); bdd_branch_nodes_.reserve(total_nr_bdd_nodes); bdd_variables_.clear(); nr_bdds_per_variable_.clear(); const size_t nr_vars = [&]() { size_t max_v=0; for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr) max_v = std::max(max_v, bdd_col.min_max_variables(bdd_nr)[1]); return max_v+1; }(); nr_bdds_per_variable_.resize(nr_vars, 0); for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr) { assert(bdd_col.is_qbdd(bdd_nr)); assert(bdd_col.is_reordered(bdd_nr)); } for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr) { assert(bdd_col.is_qbdd(bdd_nr)); assert(bdd_col.is_reordered(bdd_nr)); std::vector<bdd_variable> cur_bdd_variables; cur_bdd_variables.push_back({bdd_branch_nodes_.size(), bdd_col.min_max_variables(bdd_nr)[0]}); // TODO: use min_variable for(auto bdd_it=bdd_col.cbegin(bdd_nr); bdd_it!=bdd_col.cend(bdd_nr); ++bdd_it) { const BDD::bdd_instruction& stored_bdd = *bdd_it; assert(!stored_bdd.is_terminal()); BDD_BRANCH_NODE bdd; if(bdd_col.get_bdd_instruction(stored_bdd.lo).is_botsink()) bdd.offset_low = BDD_BRANCH_NODE::terminal_0_offset; else if(bdd_col.get_bdd_instruction(stored_bdd.lo).is_topsink()) bdd.offset_low = BDD_BRANCH_NODE::terminal_1_offset; else { assert(bdd_col.offset(stored_bdd) < stored_bdd.lo); bdd.offset_low = stored_bdd.lo - bdd_col.offset(stored_bdd); } if(bdd_col.get_bdd_instruction(stored_bdd.hi).is_botsink()) bdd.offset_high = BDD_BRANCH_NODE::terminal_0_offset; else if(bdd_col.get_bdd_instruction(stored_bdd.hi).is_topsink()) bdd.offset_high = BDD_BRANCH_NODE::terminal_1_offset; else { assert(bdd_col.offset(stored_bdd) < stored_bdd.hi); bdd.offset_high = stored_bdd.hi - bdd_col.offset(stored_bdd); } if(bdd.offset_low == BDD_BRANCH_NODE::terminal_0_offset) bdd.low_cost = std::numeric_limits<decltype(bdd.low_cost)>::infinity(); if(bdd.offset_high == BDD_BRANCH_NODE::terminal_0_offset) bdd.high_cost = std::numeric_limits<decltype(bdd.high_cost)>::infinity(); if(stored_bdd.index != cur_bdd_variables.back().variable) cur_bdd_variables.push_back({bdd_branch_nodes_.size(), stored_bdd.index}); assert(bdd_branch_nodes_.size() < total_nr_bdd_nodes); bdd_branch_nodes_.push_back(bdd); } // assert(cur_bdd_variables.back().variable == bdd_col.min_max_variables(bdd_nr)[1]); // need not hold true, we accept differently ordered BDDs. cur_bdd_variables.push_back({bdd_branch_nodes_.size(), std::numeric_limits<size_t>::max()}); // For extra delimiter at the end bdd_variables_.push_back(cur_bdd_variables.begin(), cur_bdd_variables.end()); assert(bdd_variables_.size(bdd_nr) == bdd_col.variables(bdd_nr).size()+1); for(const auto [offset, v] : cur_bdd_variables) { assert(v < nr_bdds_per_variable_.size() || v == std::numeric_limits<size_t>::max()); if(v != std::numeric_limits<size_t>::max()) nr_bdds_per_variable_[v]++; } } assert(bdd_branch_nodes_.size() == total_nr_bdd_nodes); // add last entry for offset std::vector<bdd_variable> tmp_bdd_variables; tmp_bdd_variables.push_back({bdd_branch_nodes_.size(), std::numeric_limits<size_t>::max()}); bdd_variables_.push_back(tmp_bdd_variables.begin(), tmp_bdd_variables.end()); } template<typename BDD_BRANCH_NODE> double bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound() { if(lower_bound_state_ == lower_bound_state::invalid) compute_lower_bound(); assert(lower_bound_state_ == lower_bound_state::valid); return lower_bound_; } template<typename BDD_BRANCH_NODE> double bdd_sequential_base<BDD_BRANCH_NODE>::compute_lower_bound() { if(message_passing_state_ == message_passing_state::after_backward_pass) { lower_bound_ = compute_lower_bound_after_backward_pass(); } else if(message_passing_state_ == message_passing_state::after_forward_pass) { lower_bound_ = compute_lower_bound_after_forward_pass(); } else if(message_passing_state_ == message_passing_state::none) { backward_run(); lower_bound_ = compute_lower_bound_after_backward_pass(); } lower_bound_state_ = lower_bound_state::valid; return lower_bound_; } template<typename BDD_BRANCH_NODE> double bdd_sequential_base<BDD_BRANCH_NODE>::compute_lower_bound_after_backward_pass() { MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME; assert(message_passing_state_ == message_passing_state::after_backward_pass); double lb = constant_; // TODO: works only for non-split BDDs for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { const auto [first,last] = bdd_index_range(bdd_nr, 0); assert(first+1 == last); lb += bdd_branch_nodes_[first].m; } return lb; } template<typename BDD_BRANCH_NODE> double bdd_sequential_base<BDD_BRANCH_NODE>::compute_lower_bound_after_forward_pass() { MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME; assert(message_passing_state_ == message_passing_state::after_forward_pass); double lb = constant_; // TODO: works only for non-split BDDs for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { const auto [first,last] = bdd_index_range(bdd_nr, nr_variables(bdd_nr)-1); value_type bdd_lb = std::numeric_limits<value_type>::infinity(); for(size_t idx=first; idx<last; ++idx) { const auto mm = bdd_branch_nodes_[idx].min_marginals(); bdd_lb = std::min({bdd_lb, mm[0], mm[1]}); } lb += bdd_lb; } return lb; } template<typename BDD_BRANCH_NODE> typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound_per_bdd() { if(message_passing_state_ == message_passing_state::after_backward_pass) { return lower_bound_per_bdd_after_backward_pass(); } else if(message_passing_state_ == message_passing_state::after_forward_pass) { return lower_bound_per_bdd_after_forward_pass(); } else { assert(message_passing_state_ == message_passing_state::none); backward_run(); return lower_bound_per_bdd_after_backward_pass(); } } // TODO: possibly implement template functino that takes lambda and can compute lower bound and lower bound per bdd template<typename BDD_BRANCH_NODE> typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound_per_bdd_after_forward_pass() { assert(message_passing_state_ == message_passing_state::after_forward_pass); vector_type lbs(nr_bdds()); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { const auto [first,last] = bdd_index_range(bdd_nr, nr_variables(bdd_nr)-1); value_type bdd_lb = std::numeric_limits<value_type>::infinity(); for(size_t idx=first; idx<last; ++idx) { const auto mm = bdd_branch_nodes_[idx].min_marginals(); bdd_lb = std::min({bdd_lb, mm[0], mm[1]}); } lbs[bdd_nr] = bdd_lb; } return lbs; } template<typename BDD_BRANCH_NODE> typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound_per_bdd_after_backward_pass() { assert(message_passing_state_ == message_passing_state::after_backward_pass); vector_type lbs(nr_bdds()); // TODO: works only for non-split BDDs for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { const auto [first,last] = bdd_index_range(bdd_nr, 0); assert(first+1 == last); lbs[bdd_nr] = bdd_branch_nodes_[first].m; } return lbs; } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::forward_run() { if(message_passing_state_ == message_passing_state::after_forward_pass) return; message_passing_state_ = message_passing_state::none; #pragma omp parallel for schedule(static,512) for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { // TODO: This only works for non-split BDDs with exactly one root node { const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr,0); assert(first_bdd_node + 1 == last_bdd_node); bdd_branch_nodes_[first_bdd_node].m = 0.0; } const auto [first_bdd_node, last_bdd_node] = bdd_range(bdd_nr); for(size_t i=first_bdd_node; i<last_bdd_node; ++i) bdd_branch_nodes_[i].prepare_forward_step(); for(size_t i=first_bdd_node; i<last_bdd_node; ++i) bdd_branch_nodes_[i].forward_step(); } message_passing_state_ = message_passing_state::after_forward_pass; } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::backward_run(const size_t bdd_nr) { const auto [first_bdd_node, last_bdd_node] = bdd_range(bdd_nr); for(std::ptrdiff_t i=last_bdd_node-1; i>=std::ptrdiff_t(first_bdd_node); --i) bdd_branch_nodes_[i].backward_step(); } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::backward_run() { MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME2("parallel mma backward_run"); if(message_passing_state_ == message_passing_state::after_backward_pass) return; message_passing_state_ = message_passing_state::none; #pragma omp parallel for schedule(static,512) for(std::ptrdiff_t bdd_nr=nr_bdds()-1; bdd_nr>=0; --bdd_nr) backward_run(bdd_nr); message_passing_state_ = message_passing_state::after_backward_pass; } template<typename BDD_BRANCH_NODE> std::array<size_t,2> bdd_sequential_base<BDD_BRANCH_NODE>::bdd_index_range(const size_t bdd_nr, const size_t bdd_idx) const { assert(bdd_nr < nr_bdds()); assert(bdd_idx < nr_variables(bdd_nr)); const size_t first_bdd_node = bdd_variables_(bdd_nr, bdd_idx).offset; const size_t last_bdd_node = bdd_variables_(bdd_nr, bdd_idx+1).offset; assert(first_bdd_node < last_bdd_node); return {first_bdd_node, last_bdd_node}; } template<typename BDD_BRANCH_NODE> std::array<size_t,2> bdd_sequential_base<BDD_BRANCH_NODE>::bdd_range(const size_t bdd_nr) const { assert(bdd_nr < nr_bdds()); const size_t first = bdd_variables_(bdd_nr, 0).offset; const size_t last = bdd_variables_(bdd_nr+1, 0).offset; assert(first < last); return {first, last}; } template<typename BDD_BRANCH_NODE> //two_dim_variable_array<std::array<typename BDD_BRANCH_NODE::value_type,2>> bdd_sequential_base<BDD_BRANCH_NODE>::min_marginals() two_dim_variable_array<std::array<double,2>> bdd_sequential_base<BDD_BRANCH_NODE>::min_marginals() { backward_run(); std::vector<size_t> nr_bdd_variables; nr_bdd_variables.reserve(nr_bdds()); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) nr_bdd_variables.push_back(nr_variables(bdd_nr)); two_dim_variable_array<std::array<double,2>> min_margs(nr_bdd_variables); //#pragma omp parallel for schedule(guided,128) for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { // intialize const auto [first,last] = bdd_index_range(bdd_nr, 0); assert(first + 1 == last); bdd_branch_nodes_[first].m = 0.0; for(size_t idx=0; idx<nr_variables(bdd_nr); ++idx) { std::array<value_type,2> mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()}; const auto [first,last] = bdd_index_range(bdd_nr, idx); for(size_t i=first; i<last; ++i) { const std::array<value_type,2> cur_mm = bdd_branch_nodes_[i].min_marginals(); mm[0] = std::min(mm[0], cur_mm[0]); mm[1] = std::min(mm[1], cur_mm[1]); } min_margs(bdd_nr, idx)[0] = mm[0]; min_margs(bdd_nr, idx)[1] = mm[1]; for(size_t i=first; i<last; ++i) bdd_branch_nodes_[i].prepare_forward_step(); for(size_t i=first; i<last; ++i) bdd_branch_nodes_[i].forward_step(); } } message_passing_state_ = message_passing_state::after_forward_pass; return transpose_to_var_order(min_margs); } template<typename BDD_BRANCH_NODE> std::tuple<typename bdd_sequential_base<BDD_BRANCH_NODE>::min_marginal_type, std::vector<char>> bdd_sequential_base<BDD_BRANCH_NODE>::min_marginals_stacked() { backward_run(); min_marginal_type min_margs(nr_bdd_variables(), 2); std::vector<char> solutions; solutions.reserve(nr_bdd_variables()); //#pragma omp parallel for schedule(guided,128) size_t c = 0; for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { // intialize const auto [first,last] = bdd_index_range(bdd_nr, 0); assert(first + 1 == last); const value_type bdd_lb = bdd_branch_nodes_[first].m; bdd_branch_nodes_[first].m = 0.0; size_t next_node = first; for(size_t idx=0; idx<nr_variables(bdd_nr); ++idx, ++c) { std::array<value_type,2> mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()}; const auto [first,last] = bdd_index_range(bdd_nr, idx); for(size_t i=first; i<last; ++i) { const std::array<value_type,2> cur_mm = bdd_branch_nodes_[i].min_marginals(); mm[0] = std::min(mm[0], cur_mm[0]); mm[1] = std::min(mm[1], cur_mm[1]); // see if active path points to current node; if(next_node == i) { if(cur_mm[0] < cur_mm[1]) { assert(std::abs(bdd_lb - mm[0]) <= 1e-6); solutions.push_back(0); if(bdd_branch_nodes_[i].offset_low == BDD_BRANCH_NODE::terminal_0_offset) { assert(false); // this cannot happen } else if(bdd_branch_nodes_[i].offset_low == BDD_BRANCH_NODE::terminal_1_offset) { // we have arrived at the last variable of the bdd assert(idx+1 == nr_variables(bdd_nr)); } else { next_node = std::distance(&bdd_branch_nodes_[0], bdd_branch_nodes_[i].address(bdd_branch_nodes_[i].offset_low)); } } else { assert(std::abs(bdd_lb - mm[1]) <= 1e-6); solutions.push_back(1); if(bdd_branch_nodes_[i].offset_high == BDD_BRANCH_NODE::terminal_0_offset) { assert(false); // this cannot happen } else if(bdd_branch_nodes_[i].offset_high == BDD_BRANCH_NODE::terminal_1_offset) { // we have arrived at the last variable of the bdd assert(idx+1 == nr_variables(bdd_nr)); } else { next_node = std::distance(&bdd_branch_nodes_[0], bdd_branch_nodes_[i].address(bdd_branch_nodes_[i].offset_high)); } } } } min_margs(int(c),0) = mm[0]; min_margs(int(c),1) = mm[1]; for(size_t i=first; i<last; ++i) bdd_branch_nodes_[i].prepare_forward_step(); for(size_t i=first; i<last; ++i) bdd_branch_nodes_[i].forward_step(); } } message_passing_state_ = message_passing_state::after_forward_pass; std::cout << "solutions size " << solutions.size() << "\n"; return {min_margs, solutions}; } template<typename BDD_BRANCH_NODE> typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::get_costs() { vector_type costs(nr_bdd_variables()); size_t c = 0; for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t idx=0; idx<nr_variables(bdd_nr); ++idx) { const auto [first,last] = bdd_index_range(bdd_nr, idx); for(size_t i=first; i<last; ++i) { const auto& bdd = bdd_branch_nodes_[i]; if(bdd.offset_low != BDD_BRANCH_NODE::terminal_0_offset) assert(bdd.low_cost == 0.0); if(bdd.offset_high != BDD_BRANCH_NODE::terminal_0_offset) { costs[c++] = bdd.high_cost; break; } } } } assert(costs.size() == nr_bdd_variables()); return costs; } template<typename BDD_BRANCH_NODE> template<typename COST_ITERATOR> void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(COST_ITERATOR cost_lo_begin, COST_ITERATOR cost_lo_end, COST_ITERATOR cost_hi_begin, COST_ITERATOR cost_hi_end) { message_passing_state_ = message_passing_state::none; lower_bound_state_ = lower_bound_state::invalid; auto get_lo_cost = [&](const size_t var) { if(var < std::distance(cost_lo_begin, cost_lo_end) && var < nr_variables()) return *(cost_lo_begin+var)/double(nr_bdds(var)); else return 0.0; }; auto get_hi_cost = [&](const size_t var) { if(var < std::distance(cost_hi_begin, cost_hi_end) && var < nr_variables()) return *(cost_hi_begin+var)/double(nr_bdds(var)); else return 0.0; }; //#pragma omp parallel for schedule(guided,128) for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx) { const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx); const size_t var = variable(bdd_nr, bdd_idx); const double lo_cost = get_lo_cost(var); assert(std::isfinite(lo_cost)); const double hi_cost = get_hi_cost(var); assert(std::isfinite(hi_cost)); for(size_t i=first_node; i<last_node; ++i) { if(bdd_branch_nodes_[i].offset_low == BDD_BRANCH_NODE::terminal_0_offset) assert(bdd_branch_nodes_[i].low_cost == std::numeric_limits<decltype(bdd_branch_nodes_[i].low_cost)>::infinity()); if(bdd_branch_nodes_[i].offset_high == BDD_BRANCH_NODE::terminal_0_offset) assert(bdd_branch_nodes_[i].high_cost == std::numeric_limits<decltype(bdd_branch_nodes_[i].high_cost)>::infinity()); if(bdd_branch_nodes_[i].offset_low != BDD_BRANCH_NODE::terminal_0_offset) bdd_branch_nodes_[i].low_cost += lo_cost; if(bdd_branch_nodes_[i].offset_high != BDD_BRANCH_NODE::terminal_0_offset) bdd_branch_nodes_[i].high_cost += hi_cost; } } } // go over all cost entries and add then to constant if they are not in any BDD. for(size_t i=0; i<std::max(std::distance(cost_lo_begin, cost_lo_end), std::distance(cost_hi_begin, cost_hi_end)); ++i) { if(i >= nr_variables() || nr_bdds(i) == 0) { const double lo_cost = get_lo_cost(i); const double hi_cost = get_hi_cost(i); constant_ += std::min(lo_cost, hi_cost); } } } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(const two_dim_variable_array<std::array<typename BDD_BRANCH_NODE::value_type,2>>& delta) { message_passing_state_ = message_passing_state::none; assert(delta.size() == nr_bdds()); const auto delta_t = transpose_to_bdd_order(delta); #pragma omp parallel for schedule(static,512) for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx) { const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx); for(size_t i=first_node; i<last_node; ++i) { bdd_branch_nodes_[i].low_cost += delta_t(bdd_nr, bdd_idx)[0]; bdd_branch_nodes_[i].high_cost += delta_t(bdd_nr, bdd_idx)[1]; } } } } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(const min_marginal_type& delta) { message_passing_state_ = message_passing_state::none; assert(delta.rows() == nr_bdd_variables()); assert(delta.cols() == 2); //#pragma omp parallel for schedule(guided,128) size_t c = 0; for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx, ++c) { const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx); for(size_t i=first_node; i<last_node; ++i) { bdd_branch_nodes_[i].low_cost += delta(c, 0); bdd_branch_nodes_[i].high_cost += delta(c, 1); } } } } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(const vector_type& delta) { message_passing_state_ = message_passing_state::none; assert(delta.rows() == nr_bdd_variables()); assert(delta.cols() == 1); //#pragma omp parallel for schedule(guided,128) size_t c = 0; for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx, ++c) { const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx); for(size_t i=first_node; i<last_node; ++i) { bdd_branch_nodes_[i].high_cost += delta(c, 0); } } } } template<typename BDD_BRANCH_NODE> template<typename ITERATOR> void bdd_sequential_base<BDD_BRANCH_NODE>::fix_variables(ITERATOR zero_fixations_begin, ITERATOR zero_fixations_end, ITERATOR one_fixations_begin, ITERATOR one_fixations_end) { // TODO: check for variables that are not covered by any BDD. They might change the constant_ std::unordered_set<size_t> zero_fixations(zero_fixations_begin, zero_fixations_end); std::unordered_set<size_t> one_fixations(one_fixations_begin, one_fixations_end); assert(zero_fixations.size() + one_fixations.size() > 0); for(const size_t v : zero_fixations) assert(nr_bdds(v) > 0); for(const size_t v : one_fixations) assert(nr_bdds(v) > 0); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx) { const size_t var = variable(bdd_nr, bdd_idx); const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx); assert(!(zero_fixations.count(var) > 0 && one_fixations.count(var) > 0)); if(zero_fixations.count(var) > 0) for(size_t i=first_bdd_node; i<last_bdd_node; ++i) bdd_branch_nodes_[i].high_cost = std::numeric_limits<value_type>::infinity(); if(one_fixations.count(var) > 0) for(size_t i=first_bdd_node; i<last_bdd_node; ++i) bdd_branch_nodes_[i].low_cost = std::numeric_limits<value_type>::infinity(); } } } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::diffusion_step(const two_dim_variable_array<std::array<typename BDD_BRANCH_NODE::value_type,2>>& min_margs, const value_type damping_step) { throw std::runtime_error("not correct yet"); message_passing_state_ = message_passing_state::none; assert(min_margs.size() == nr_bdds()); assert(damping_step >= 0.0 && damping_step <= 1.0); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { assert(min_margs.size(bdd_nr) == nr_bdd_variables(bdd_nr)); for(size_t bdd_idx=0; bdd_idx<nr_bdd_variables(bdd_nr); ++bdd_idx) { const size_t var = variable(bdd_nr, bdd_idx); const value_type denom = 1.0 / (nr_bdds(var)-1); const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx); for(size_t i=first_bdd_node; i<last_bdd_node; ++i) { bdd_branch_nodes_[i].low_cost -= min_margs(bdd_nr, bdd_idx)[0]; bdd_branch_nodes_[i].high_cost -= min_margs(bdd_nr, bdd_idx)[1]; } } } } template<typename REAL> void atomic_add(REAL& f, const REAL d) { if(d == 0) return; // TODO: use std::atomic_ref when available in C++20 Foo::atomic_ref<REAL> f_ref{f}; f_ref += d; } template<typename REAL> void atomic_store(REAL& f, const REAL d) { Foo::atomic_ref<REAL> f_ref{f}; f_ref.store(d); } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::forward_mm( const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute) { assert(mms_to_collect.size() == nr_variables()); assert(mms_to_distribute.size() == nr_variables()); assert(omega > 0.0 && omega <= 1.0); assert(bdd_nr < nr_bdds()); { const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, 0); assert(first_bdd_node + 1 == last_bdd_node); bdd_branch_nodes_[first_bdd_node].m = 0.0; } for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx) { const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx); const size_t var = variable(bdd_nr, bdd_idx); std::array<value_type,2> cur_mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()}; for(size_t i=first_bdd_node; i<last_bdd_node; ++i) { const auto bdd_mm = bdd_branch_nodes_[i].min_marginals(); cur_mm[0] = std::min(bdd_mm[0], cur_mm[0]); cur_mm[1] = std::min(bdd_mm[1], cur_mm[1]); } if(!std::isfinite(cur_mm[0])) atomic_store(mms_to_collect[var][0], std::numeric_limits<value_type>::infinity()); if(!std::isfinite(cur_mm[1])) atomic_store(mms_to_collect[var][1], std::numeric_limits<value_type>::infinity()); if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1])) { if(cur_mm[0] < cur_mm[1]) atomic_add(mms_to_collect[var][1], omega*(cur_mm[1] - cur_mm[0])); else atomic_add(mms_to_collect[var][0], omega*(cur_mm[0] - cur_mm[1])); } assert(mms_to_collect[var][0] >= 0.0); assert(mms_to_collect[var][1] >= 0.0); for(size_t i=first_bdd_node; i<last_bdd_node; ++i) { if(!std::isfinite(cur_mm[0])) bdd_branch_nodes_[i].low_cost = std::numeric_limits<value_type>::infinity(); if(!std::isfinite(cur_mm[1])) bdd_branch_nodes_[i].high_cost = std::numeric_limits<value_type>::infinity(); if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1])) { //bdd_branch_nodes_[i].low_cost += std::min(omega*(cur_mm[1] - cur_mm[0]), value_type(0.0)); //bdd_branch_nodes_[i].high_cost += std::min(omega*(cur_mm[0] - cur_mm[1]), value_type(0.0)); if(cur_mm[0] < cur_mm[1]) bdd_branch_nodes_[i].high_cost += omega*(cur_mm[0] - cur_mm[1]); else bdd_branch_nodes_[i].low_cost += omega*(cur_mm[1] - cur_mm[0]); } } if(bdd_idx+1<nr_variables(bdd_nr)) { const auto [next_first_bdd_node, next_last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx+1); for(size_t i=next_first_bdd_node; i<next_last_bdd_node; ++i) bdd_branch_nodes_[i].m = std::numeric_limits<value_type>::infinity(); } for(size_t i=first_bdd_node; i<last_bdd_node; ++i) { bdd_branch_nodes_[i].low_cost += mms_to_distribute[var][0]; bdd_branch_nodes_[i].high_cost += mms_to_distribute[var][1]; bdd_branch_nodes_[i].forward_step(); } } } template<typename BDD_BRANCH_NODE> typename BDD_BRANCH_NODE::value_type bdd_sequential_base<BDD_BRANCH_NODE>::backward_mm(const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute) { assert(mms_to_collect.size() == nr_variables()); assert(mms_to_distribute.size() == nr_variables()); assert(omega > 0.0 && omega <= 1.0); assert(bdd_nr < nr_bdds()); for(std::ptrdiff_t bdd_idx=nr_variables(bdd_nr)-1; bdd_idx>=0; --bdd_idx) { const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx); const size_t var = variable(bdd_nr, bdd_idx); std::array<value_type,2> cur_mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()}; for(std::ptrdiff_t i=std::ptrdiff_t(last_bdd_node)-1; i>=std::ptrdiff_t(first_bdd_node); --i) { const auto bdd_mm = bdd_branch_nodes_[i].min_marginals(); cur_mm[0] = std::min(bdd_mm[0], cur_mm[0]); cur_mm[1] = std::min(bdd_mm[1], cur_mm[1]); } if(!std::isfinite(cur_mm[0])) atomic_store(mms_to_collect[var][0], std::numeric_limits<value_type>::infinity()); if(!std::isfinite(cur_mm[1])) atomic_store(mms_to_collect[var][1], std::numeric_limits<value_type>::infinity()); if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1])) { if(cur_mm[0] < cur_mm[1]) atomic_add(mms_to_collect[var][1], omega*(cur_mm[1] - cur_mm[0])); else atomic_add(mms_to_collect[var][0], omega*(cur_mm[0] - cur_mm[1])); } assert(mms_to_collect[var][0] >= 0.0); assert(mms_to_collect[var][1] >= 0.0); for(std::ptrdiff_t i=std::ptrdiff_t(last_bdd_node)-1; i>=std::ptrdiff_t(first_bdd_node); --i) { if(!std::isfinite(cur_mm[0])) bdd_branch_nodes_[i].low_cost = std::numeric_limits<value_type>::infinity(); if(!std::isfinite(cur_mm[1])) bdd_branch_nodes_[i].high_cost = std::numeric_limits<value_type>::infinity(); if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1])) { //bdd_branch_nodes_[i].low_cost += std::min(omega*(cur_mm[1] - cur_mm[0]), value_type(0.0)); //bdd_branch_nodes_[i].high_cost += std::min(omega*(cur_mm[0] - cur_mm[1]), value_type(0.0)); if(cur_mm[0] < cur_mm[1]) bdd_branch_nodes_[i].high_cost += omega*(cur_mm[0] - cur_mm[1]); else bdd_branch_nodes_[i].low_cost += omega*(cur_mm[1] - cur_mm[0]); } } for(std::ptrdiff_t i=std::ptrdiff_t(last_bdd_node)-1; i>=std::ptrdiff_t(first_bdd_node); --i) { bdd_branch_nodes_[i].low_cost += mms_to_distribute[var][0]; bdd_branch_nodes_[i].high_cost += mms_to_distribute[var][1]; bdd_branch_nodes_[i].backward_step(); } } const auto [root_bdd_node_begin, root_bdd_node_end] = bdd_index_range(bdd_nr, 0); assert(root_bdd_node_begin+1 == root_bdd_node_end); return bdd_branch_nodes_[root_bdd_node_begin].m; } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::parallel_mma() { backward_run(); auto reset_mms = [&](std::vector<std::array<value_type,2>>& mms) { assert(mms.size() == nr_variables()); std::fill(mms.begin(), mms.end(), std::array<value_type,2>{0.0,0.0}); }; auto init_mms = [&](std::vector<std::array<value_type,2>>& mms) { if(mms.size() != nr_variables()) { assert(mms.size() == 0.0); mms = std::vector<std::array<value_type,2>>(nr_variables(), {0.0,0.0}); } }; auto average_mms = [&](std::vector<std::array<value_type,2>>& mms) { MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME2("parallel mma marginal averaging"); #pragma omp parallel for for(size_t var=0; var<nr_variables(); ++var) { assert(nr_bdds(var) > 0); mms[var][0] /= value_type(nr_bdds(var)); mms[var][1] /= value_type(nr_bdds(var)); } }; init_mms(mms_to_collect_); init_mms(mms_to_distribute_); double lb = constant_; { MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME2("parallel mma incremental marginal computation"); #pragma omp parallel for schedule(static,256) for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) forward_mm(bdd_nr, 0.5, mms_to_collect_, mms_to_distribute_); average_mms(mms_to_collect_); reset_mms(mms_to_distribute_); std::swap(mms_to_collect_, mms_to_distribute_); #pragma omp parallel for schedule(static,256) reduction(+:lb) for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) lb += backward_mm(bdd_nr, 0.5, mms_to_collect_, mms_to_distribute_); average_mms(mms_to_collect_); reset_mms(mms_to_distribute_); std::swap(mms_to_collect_, mms_to_distribute_); } lower_bound_ = lb; message_passing_state_ = message_passing_state::after_backward_pass; lower_bound_state_ = lower_bound_state::valid; } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::distribute_delta() { message_passing_state_ = message_passing_state::none; lower_bound_state_ = lower_bound_state::invalid; assert(mms_to_distribute_.size() == nr_variables()); #pragma omp parallel for for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx) { const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx); const size_t var = variable(bdd_nr, bdd_idx); for(size_t i=first_bdd_node; i<last_bdd_node; ++i) { bdd_branch_nodes_[i].low_cost += mms_to_distribute_[var][0]; bdd_branch_nodes_[i].high_cost += mms_to_distribute_[var][1]; } } } const std::array<typename BDD_BRANCH_NODE::value_type,2> zeros = {0.0, 0.0}; std::fill(mms_to_distribute_.begin(), mms_to_distribute_.end(), zeros); } template<typename BDD_BRANCH_NODE> template<typename T> two_dim_variable_array<T> bdd_sequential_base<BDD_BRANCH_NODE>::transpose_to_var_order(const two_dim_variable_array<T>& m) const { assert(m.size() == nr_bdds()); std::vector<size_t> counter(nr_variables(), 0); two_dim_variable_array<T> transposed(nr_bdds_per_variable_); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { assert(m.size(bdd_nr) == nr_variables(bdd_nr)); for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx) { const size_t var = variable(bdd_nr, bdd_idx); transposed(var, counter[var]++) = m(bdd_nr, bdd_idx); } } return transposed; } template<typename BDD_BRANCH_NODE> template<typename T> two_dim_variable_array<T> bdd_sequential_base<BDD_BRANCH_NODE>::transpose_to_bdd_order(const two_dim_variable_array<T>& m) const { assert(m.size() == nr_variables()); std::vector<size_t> counter; counter.reserve(nr_bdds()); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) counter.push_back(nr_variables(bdd_nr)); two_dim_variable_array<T> transposed(counter); counter.clear(); counter.resize(nr_variables(),0); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx) { const size_t var = variable(bdd_nr, bdd_idx); transposed(bdd_nr, bdd_idx) = m(var, counter[var]++); } } return transposed; } template<typename BDD_BRANCH_NODE> Eigen::SparseMatrix<typename BDD_BRANCH_NODE::value_type> bdd_sequential_base<BDD_BRANCH_NODE>::Lagrange_constraint_matrix() const { using T = Eigen::Triplet<value_type>; std::vector<T> coefficients; size_t c = 0; for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx, ++c) { const size_t var = variable(bdd_nr, bdd_idx); coefficients.push_back(T(var,c,1)); } } Eigen::SparseMatrix<value_type> A(nr_variables(), nr_bdd_variables()); A.setFromTriplets(coefficients.begin(), coefficients.end()); return A; } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::export_graphviz(const char* filename) { const std::string f(filename); export_graphviz(f); } template<typename BDD_BRANCH_NODE> void bdd_sequential_base<BDD_BRANCH_NODE>::export_graphviz(const std::string& filename) { const std::string base_filename = std::filesystem::path(filename).replace_extension("").c_str(); for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr) { const std::string dot_file = base_filename + "_" + std::to_string(bdd_nr) + ".dot"; std::fstream f; f.open(dot_file, std::fstream::out | std::ofstream::trunc); export_graphviz(f, bdd_nr); f.close(); const std::string png_file = base_filename + "_" + std::to_string(bdd_nr) + ".png"; const std::string convert_command = "dot -Tpng " + dot_file + " > " + png_file; std::system(convert_command.c_str()); } } template<typename BDD_BRANCH_NODE> template<typename STREAM> void bdd_sequential_base<BDD_BRANCH_NODE>::export_graphviz(STREAM& s, const size_t bdd_nr) { s << "digraph BDD\n"; s << "{\n"; const auto [first,last] = bdd_range(bdd_nr); for(size_t bdd_idx=first; bdd_idx<last; ++bdd_idx) { auto& bdd = bdd_branch_nodes_[bdd_idx]; if(bdd.offset_low != BDD_BRANCH_NODE::terminal_0_offset && bdd.offset_low != BDD_BRANCH_NODE::terminal_1_offset) { s << "\"" << &bdd << "\" -> \"" << bdd.address(bdd.offset_low) << "\" [ style=\"dashed\"];\n";; } else { s << "\"" << &bdd << "\" -> " << " bot [style=\"dashed\"];\n"; } if(bdd.offset_high != BDD_BRANCH_NODE::terminal_0_offset && bdd.offset_high != BDD_BRANCH_NODE::terminal_1_offset) { s << "\"" << &bdd << "\" -> \"" << bdd.address(bdd.offset_high) << "\";\n"; } else { s << "\"" << &bdd << "\" -> " << " top;\n"; } } s << "}\n"; } }
GB_binop__isne_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc32) // C=scalar+B GB (_bind1st__isne_fc32) // C=scalar+B' GB (_bind1st_tran__isne_fc32) // C=A+scalar GB (_bind2nd__isne_fc32) // C=A'+scalar GB (_bind2nd_tran__isne_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_isne (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC32 || GxB_NO_ISNE_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
4.norace1.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 4 int main() { int A[N][N][N][N][N][N][N][N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) for (int k = 1; k < N; k++) for (int l = 1; l < N; l++) for (int m = 1; m < N; m++) for (int n = 1; n < N; n++) for (int o = 1; o < N; o++) for (int p = 1; p < N; p++) for (int q = 1; q < N; q++) A[i][j][k][l][m][n][o][p][q] = A[i][j][k][l][m][n][o][p][q]; } // CHECK: Region is Data Race Free. // END
dem_structures_coupling_utilities.h
/* * Author: Miguel Angel Celigueta * * maceli@cimne.upc.edu */ #ifndef KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H #define KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H // /* External includes */ // System includes // Project includes #include "includes/variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "custom_conditions/RigidFace.h" #include "custom_conditions/RigidEdge.h" #include "DEM_application_variables.h" #include "dem_structures_coupling_application_variables.h" #include "custom_elements/spheric_continuum_particle.h" namespace Kratos { class DemStructuresCouplingUtilities { public: typedef ModelPart::NodesContainerType::ContainerType::iterator NodesIteratorType; KRATOS_CLASS_POINTER_DEFINITION(DemStructuresCouplingUtilities); /// Default constructor DemStructuresCouplingUtilities(){} /// Destructor virtual ~DemStructuresCouplingUtilities(){} //*************************************************************************************************************** //*************************************************************************************************************** void TransferStructuresSkinToDem(ModelPart& r_source_model_part, ModelPart& r_destination_model_part, Properties::Pointer props) { // std::string error = CheckProvidedProperties(props); const int dimension = r_source_model_part.GetProcessInfo()[DOMAIN_SIZE]; // if (error != "all_ok") KRATOS_ERROR << "The Dem Walls ModelPart has no valid Properties. Missing " << error << " . Exiting." << std::endl; r_destination_model_part.Conditions().Sort(); int id = 1; if (r_destination_model_part.Conditions().size()) id = (r_destination_model_part.ConditionsEnd()-1)->Id() + 1; ModelPart::ConditionsContainerType& source_conditions = r_source_model_part.Conditions(); // Adding conditions for (unsigned int i = 0; i < source_conditions.size(); i++) { ModelPart::ConditionsContainerType::iterator it = r_source_model_part.ConditionsBegin() + i; Geometry< Node<3> >::Pointer p_geometry = it->pGetGeometry(); Condition::Pointer cond; if (dimension == 2) { cond = Condition::Pointer(new RigidEdge2D(id, p_geometry, props)); } else { cond = Condition::Pointer(new RigidFace3D(id, p_geometry, props)); } cond->Set(DEMFlags::STICKY, true); r_destination_model_part.AddCondition(cond); //TODO: add all of them in a single sentence! AddConditions. Use a temporary PointerVector as a list (not std::vector!). id++; } // Adding nodes r_destination_model_part.AddNodes(r_source_model_part.NodesBegin(), r_source_model_part.NodesEnd()); } std::string CheckProvidedProperties(Properties::Pointer props) { std::vector<const Variable<double>* > list_of_variables_double_to_check = {&STATIC_FRICTION, &DYNAMIC_FRICTION, &FRICTION_DECAY, &WALL_COHESION, &SEVERITY_OF_WEAR, &IMPACT_WEAR_SEVERITY, &BRINELL_HARDNESS, &YOUNG_MODULUS, &POISSON_RATIO}; std::vector<const Variable<bool>* > list_of_variables_bool_to_check = {&COMPUTE_WEAR}; for (int i=0; i<(int)list_of_variables_double_to_check.size(); i++) { if(!props->Has(*list_of_variables_double_to_check[i])) return list_of_variables_double_to_check[i]->Name(); } for (int i=0; i<(int)list_of_variables_bool_to_check.size(); i++) { if(!props->Has(*list_of_variables_bool_to_check[i])) return list_of_variables_bool_to_check[i]->Name(); } return "all_ok"; } void SmoothLoadTrasferredToFem(ModelPart& r_model_part, const double portion_of_the_force_which_is_new) { #pragma omp parallel for for (int i=0; i<(int)r_model_part.Nodes().size(); i++) { auto node_it = r_model_part.NodesBegin() + i; array_1d<double, 3> averaged_force; array_1d<double, 3>& node_dem_load = node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD); noalias(averaged_force) = portion_of_the_force_which_is_new * node_dem_load + (1.0 - portion_of_the_force_which_is_new) * node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD, 1); noalias(node_dem_load) = averaged_force; } } void ComputeSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) { const std::string sand_prod_filename = "sand_production_graph.txt"; static std::ofstream ofs_sand_prod_file; static bool first_time_entered = true; if (first_time_entered) { ofs_sand_prod_file.open(sand_prod_filename, std::ofstream::out | std::ofstream::trunc); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); double current_total_mass_in_grams = 0.0; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element); if (p_sphere->Is(ISOLATED)) continue; const double particle_density = p_sphere->GetDensity(); const double particle_volume = p_sphere->CalculateVolume(); current_total_mass_in_grams += particle_volume * particle_density * 1.0e3; } static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; //ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin(); //const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * 0.000145; ProcessInfo& r_process_info = dem_model_part.GetProcessInfo(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = fabs(r_process_info[TARGET_STRESS_Z]) * Pascals_to_psi_factor; static std::ofstream sand_prod_file("sand_production_graph.txt", std::ios_base::out | std::ios_base::app); sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; sand_prod_file.flush(); } void MarkBrokenSpheres(ModelPart& dem_model_part) { ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element); if (p_sphere->Is(ISOLATED)) continue; bool go_to_next_particle = false; for (unsigned int i = 0; i < p_sphere->mContinuumInitialNeighborsSize; i++) { if (!p_sphere->mIniNeighbourFailureId[i]) { go_to_next_particle = true; break; } } if (go_to_next_particle) continue; else p_sphere->Set(ISOLATED, true); } } void ComputeSandProductionWithDepthFirstSearchNonRecursiveImplementation(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) { const std::string sand_prod_filename = "sand_production_graph_with_chunks_non_recursive.txt"; static std::ofstream ofs_sand_prod_file; const std::string granulometry_distr_filename = "granulometry_distribution.txt"; static std::ofstream ofs_granulometry_distr_file; static bool first_time_entered = true; if (first_time_entered) { ofs_sand_prod_file.open(sand_prod_filename, std::ofstream::out | std::ofstream::trunc); ofs_granulometry_distr_file.open(granulometry_distr_filename, std::ofstream::out | std::ofstream::trunc); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); std::vector<double> chunks_masses; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; it->Set(VISITED, false); } std::vector<SphericContinuumParticle*> stack_of_particles_to_check; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element); double this_chunk_mass = 0.0; stack_of_particles_to_check.push_back(p_sphere); while (stack_of_particles_to_check.size()) { SphericContinuumParticle* current_particle = stack_of_particles_to_check.back(); stack_of_particles_to_check.pop_back(); if (current_particle->Is(VISITED)) continue; const double particle_density = current_particle->GetDensity(); const double particle_volume = current_particle->CalculateVolume(); this_chunk_mass += particle_volume * particle_density * 1.0e3; current_particle->Set(VISITED, true); for (size_t i = 0; i < current_particle->mContinuumInitialNeighborsSize; i++) { SphericParticle* p_neighbour_sphere = current_particle->mNeighbourElements[i]; if (p_neighbour_sphere == NULL) continue; if (p_neighbour_sphere->Is(VISITED)) continue; //not necessary, but saves increasing and decreasing stack_of_particles_to_check's size if (current_particle->mIniNeighbourFailureId[i]) continue; auto existing_element_it = dem_model_part.GetMesh(0).Elements().find(p_neighbour_sphere->Id()); if (existing_element_it == dem_model_part.GetMesh(0).ElementsEnd()) continue; SphericContinuumParticle* p_neigh_cont_sphere = dynamic_cast<SphericContinuumParticle*>(p_neighbour_sphere); stack_of_particles_to_check.push_back(p_neigh_cont_sphere); } } if (this_chunk_mass) chunks_masses.push_back(this_chunk_mass); } const double max_mass_of_a_single_chunck = *std::max_element(chunks_masses.begin(), chunks_masses.end()); const double current_total_mass_in_grams = max_mass_of_a_single_chunck; static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; ProcessInfo& r_process_info = dem_model_part.GetProcessInfo(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = fabs(r_process_info[TARGET_STRESS_Z]) * Pascals_to_psi_factor; ofs_sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; ofs_sand_prod_file.flush(); unsigned int number_of_time_steps_between_granulometry_prints = 1e9; static unsigned int printing_counter = 0; if (printing_counter == number_of_time_steps_between_granulometry_prints) { ofs_granulometry_distr_file << time; for (unsigned int k = 0; k < chunks_masses.size(); k++) ofs_granulometry_distr_file << " " << chunks_masses[k]; ofs_granulometry_distr_file << '\n'; printing_counter = 0; } printing_counter++; ofs_granulometry_distr_file.flush(); } void ComputeSandProductionWithDepthFirstSearch(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) { const std::string filename = "sand_production_graph_with_chunks.txt"; std::ifstream ifile(filename.c_str()); static bool first_time_entered = true; if ((bool) ifile && first_time_entered) { std::remove(filename.c_str()); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); std::vector<double> chunks_masses; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; it->Set(VISITED, false); } for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element); double this_chunk_mass = 0.0; if( it->IsNot(VISITED) ) { DepthFirstSearchVisit(p_sphere, this_chunk_mass); chunks_masses.push_back(this_chunk_mass); } } const double max_mass_of_a_single_chunck = *std::max_element(chunks_masses.begin(), chunks_masses.end()); const double current_total_mass_in_grams = max_mass_of_a_single_chunck; static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * Pascals_to_psi_factor; static std::ofstream sand_prod_file(filename, std::ios_base::out | std::ios_base::app); sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; sand_prod_file.flush(); } void DepthFirstSearchVisit(SphericContinuumParticle* p_sphere, double& this_chunk_mass) { p_sphere->Set(VISITED, true); const double particle_radius = p_sphere->GetRadius(); const double particle_density = p_sphere->GetDensity(); this_chunk_mass += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0; for (size_t i=0; i<p_sphere->mContinuumInitialNeighborsSize; i++) { SphericParticle* p_neighbour_sphere = p_sphere->mNeighbourElements[i]; if (p_neighbour_sphere==NULL) continue; if (p_sphere->mIniNeighbourFailureId[i]) continue; if (p_neighbour_sphere->IsNot(VISITED)) { SphericContinuumParticle* p_neigh_cont_sphere = dynamic_cast<SphericContinuumParticle*>(p_neighbour_sphere); DepthFirstSearchVisit(p_neigh_cont_sphere, this_chunk_mass); } } } void ComputeTriaxialSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part_1, ModelPart& outer_walls_model_part_2, const double time) { const std::string filename = "sand_production_graph.txt"; std::ifstream ifile(filename.c_str()); static bool first_time_entered = true; if ((bool) ifile && first_time_entered) { std::remove(filename.c_str()); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); double current_total_mass_in_grams = 0.0; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element); if (p_sphere->Is(ISOLATED)) continue; const double particle_radius = p_sphere->GetRadius(); const double particle_density = p_sphere->GetDensity(); current_total_mass_in_grams += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0; } static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; ModelPart::ConditionsContainerType::iterator condition_begin_1 = outer_walls_model_part_1.ConditionsBegin(); ModelPart::ConditionsContainerType::iterator condition_begin_2 = outer_walls_model_part_2.ConditionsBegin(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = (condition_begin_1->GetValue(POSITIVE_FACE_PRESSURE) + condition_begin_2->GetValue(POSITIVE_FACE_PRESSURE) + 3.45e6) * Pascals_to_psi_factor * 0.33333333333333; // 3.45e6 is the sigma_z constant pressure static std::ofstream sand_prod_file(filename, std::ios_base::out | std::ios_base::app); sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; sand_prod_file.flush(); } //*************************************************************************************************************** //*************************************************************************************************************** /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } protected: private: /// Assignment operator DemStructuresCouplingUtilities & operator=(DemStructuresCouplingUtilities const& rOther); ///@} }; // Class DemStructuresCouplingUtilities } // namespace Python. #endif // KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "api/libperf.h" #include "lib/libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/sys/sock.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <sys/poll.h> #include <locale.h> #if defined (HAVE_MPI) # include <mpi.h> #elif defined (HAVE_RTE) # include<rte.h> #endif #define MAX_BATCH_FILES 32 #define MAX_CPUS 1024 #define TL_RESOURCE_NAME_NONE "<none>" #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCIqM:r:E:T:d:x:A:BUm:" #define TEST_ID_UNDEFINED -1 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; const char *overhead_lat; unsigned window_size; } test_type_t; typedef struct perftest_params { ucx_perf_params_t super; int test_id; } perftest_params_t; struct perftest_context { perftest_params_t params; const char *server_addr; int port; int mpi; unsigned num_cpus; unsigned cpus[MAX_CPUS]; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency", "latency", 1}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency", "latency", 1}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate", "latency", 1}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate", "latency", 1}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate", "latency", 1}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate", "overhead", 1}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate", "overhead", 1}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate", "overhead", 1}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency", "latency", 1}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth", "overhead", 32}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency", "latency", 1}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth", "overhead", 32}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth", "overhead", 32}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate", "overhead", 1}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate", "latency", 1}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate", "latency", 1}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate", "latency", 1}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth", "overhead", 1}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency", "latency", 1}, {"ucp_am_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "am latency", "latency", 1}, {"ucp_am_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "am bandwidth / message rate", "overhead", 32}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { typedef ssize_t (*sock_call)(int, void *, size_t, int); return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final, int is_server, int is_multi_thread) { static const char *fmt_csv; static const char *fmt_numeric; static const char *fmt_plain; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } #if _OPENMP if (!final) { printf("[thread %d]", omp_get_thread_num()); } else if (flags & TEST_FLAG_PRINT_RESULTS) { printf("Final: "); } #endif if (is_multi_thread && final) { fmt_csv = "%4.0f,%.3f,%.2f,%.0f\n"; fmt_numeric = "%'18.0f %29.3f %22.2f %'24.0f\n"; fmt_plain = "%18.0f %29.3f %22.2f %23.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.total_average * 1000000.0, result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.total_average); } else { fmt_csv = "%4.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; fmt_numeric = "%'18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %'11.0f %'11.0f\n"; fmt_plain = "%18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %11.0f %11.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); } fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *overhead_lat_str; const char *test_data_str; const char *test_api_str; test_type_t *test; unsigned i; test = (ctx->params.test_id == TEST_ID_UNDEFINED) ? NULL : &tests[ctx->params.test_id]; if ((ctx->flags & TEST_FLAG_PRINT_TEST) && (test != NULL)) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.super.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_SHORT_IOV: test_data_str = "short iov"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Send memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.send_mem_type]); printf("| Recv memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.recv_mem_type]); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params.super)); if ((test->api == UCX_PERF_API_UCP) && (test->command == UCX_PERF_CMD_AM)) { printf("| AM header size: %-60zu |\n", ctx->params.super.ucp.am_hdr_size); } } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", ucs_basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { overhead_lat_str = (test == NULL) ? "overhead" : test->overhead_lat; printf("+--------------+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | | %8s (usec) | bandwidth (MB/s) | message rate (msg/s) |\n", overhead_lat_str); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| Stage | # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void print_memory_type_usage(void) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if (ucx_perf_mem_type_allocators[it] != NULL) { printf(" %s - %s\n", ucs_memory_type_names[it], ucs_memory_type_descs[it]); } } } static void usage(const struct perftest_context *ctx, const char *program) { static const char* api_names[] = { [UCX_PERF_API_UCT] = "UCT", [UCX_PERF_API_UCP] = "UCP" }; test_type_t *test; int UCS_V_UNUSED rank; #ifdef HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ctx->mpi && (rank != 0)) { return; } #endif #if defined (HAVE_MPI) printf(" Note: test can be also launched as an MPI application\n"); printf("\n"); #elif defined (HAVE_RTE) printf(" Note: this test can be also launched as an libRTE application\n"); printf("\n"); #endif printf(" Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); printf(" Common options:\n"); printf(" -t <test> test to run:\n"); for (test = tests; test->name; ++test) { printf(" %13s - %s %s\n", test->name, api_names[test->api], test->desc); } printf("\n"); printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n", ctx->params.super.msg_size_list[0]); printf(" for example: \"-s 16,48,8192,8192,14\"\n"); printf(" -m <send mem type>[,<recv mem type>]\n"); printf(" memory type of message for sender and receiver (host)\n"); print_memory_type_usage(); printf(" -n <iters> number of iterations to run (%"PRIu64")\n", ctx->params.super.max_iter); printf(" -w <iters> number of warm-up iterations (%"PRIu64")\n", ctx->params.super.warmup_iter); printf(" -c <cpulist> set affinity to this CPU list (separated by comma) (off)\n"); printf(" -O <count> maximal number of uncompleted outstanding sends\n"); printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n", ctx->params.super.iov_stride); printf(" -T <threads> number of threads in the test (%d)\n", ctx->params.super.thread_count); printf(" -o do not progress the responder in one-sided tests\n"); printf(" -B register memory with NONBLOCK flag\n"); printf(" -b <file> read and execute tests from a batch file: every line in the\n"); printf(" file is a test to run, first word is test name, the rest of\n"); printf(" the line is command-line arguments for the test.\n"); printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port); #ifdef HAVE_MPI printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h show this help message\n"); printf("\n"); printf(" Output format:\n"); printf(" -N use numeric formatting (thousands separator)\n"); printf(" -f print only final numbers\n"); printf(" -v print CSV-formatted output\n"); printf("\n"); printf(" UCT only:\n"); printf(" -d <device> device to use for testing\n"); printf(" -x <tl> transport to use for testing\n"); printf(" -D <layout> data layout for sender side:\n"); printf(" short - short messages (default, cannot be used for get)\n"); printf(" shortiov - short io-vector messages (only for active messages)\n"); printf(" bcopy - copy-out (cannot be used for atomics)\n"); printf(" zcopy - zero-copy (cannot be used for atomics)\n"); printf(" iov - scatter-gather list (iovec)\n"); printf(" -W <count> flow control window size, for active messages (%u)\n", ctx->params.super.uct.fc_window); printf(" -H <size> active message header size (%zu), included in message size\n", ctx->params.super.uct.am_hdr_size); printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n"); printf(" thread_spinlock - separate progress thread with spin locking\n"); printf(" thread_mutex - separate progress thread with mutex locking\n"); printf(" signal - signal-based timer\n"); printf("\n"); printf(" UCP only:\n"); printf(" -M <thread> thread support level for progress engine (single)\n"); printf(" single - only the master thread can access\n"); printf(" serialized - one thread can access at a time\n"); printf(" multi - multiple threads can access\n"); printf(" -D <layout>[,<layout>]\n"); printf(" data layout for sender and receiver side (contig)\n"); printf(" contig - Continuous datatype\n"); printf(" iov - Scatter-gather list\n"); printf(" -C use wild-card tag for tag tests\n"); printf(" -U force unexpected flow by using tag probe\n"); printf(" -r <mode> receive mode for stream tests (recv)\n"); printf(" recv : Use ucp_stream_recv_nb\n"); printf(" recv_data : Use ucp_stream_recv_data_nb\n"); printf(" -I create context with wakeup feature enabled\n"); printf(" -E <mode> wait mode for tests\n"); printf(" poll : repeatedly call worker_progress\n"); printf(" sleep : go to sleep after posting requests\n"); printf(" -H <size> active message header size (%zu), not included in message size\n", ctx->params.super.ucp.am_hdr_size); printf("\n"); printf(" NOTE: When running UCP tests, transport and device should be specified by\n"); printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n"); printf("\n"); } static ucs_status_t parse_ucp_datatype_params(const char *opt_arg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(opt_arg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(opt_arg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_mem_type(const char *opt_arg, ucs_memory_type_t *mem_type) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if(!strcmp(opt_arg, ucs_memory_type_names[it]) && (ucx_perf_mem_type_allocators[it] != NULL)) { *mem_type = it; return UCS_OK; } } ucs_error("Unsupported memory type: \"%s\"", opt_arg); return UCS_ERR_INVALID_PARAM; } static ucs_status_t parse_mem_type_params(const char *opt_arg, ucs_memory_type_t *send_mem_type, ucs_memory_type_t *recv_mem_type) { const char *delim = ","; char *token = strtok((char*)opt_arg, delim); if (UCS_OK != parse_mem_type(token, send_mem_type)) { return UCS_ERR_INVALID_PARAM; } token = strtok(NULL, delim); if (NULL == token) { *recv_mem_type = *send_mem_type; return UCS_OK; } else { return parse_mem_type(token, recv_mem_type); } } static ucs_status_t parse_message_sizes_params(const char *opt_arg, ucx_perf_params_t *params) { const char delim = ','; size_t *msg_size_list, token_num, token_it; char *optarg_ptr, *optarg_ptr2; optarg_ptr = (char *)opt_arg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; msg_size_list = realloc(params->msg_size_list, sizeof(*params->msg_size_list) * token_num); if (NULL == msg_size_list) { return UCS_ERR_NO_MEMORY; } params->msg_size_list = msg_size_list; optarg_ptr = (char *)opt_arg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static ucs_status_t init_test_params(perftest_params_t *params) { memset(params, 0, sizeof(*params)); params->super.api = UCX_PERF_API_LAST; params->super.command = UCX_PERF_CMD_LAST; params->super.test_type = UCX_PERF_TEST_TYPE_LAST; params->super.thread_mode = UCS_THREAD_MODE_SINGLE; params->super.thread_count = 1; params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE; params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST; params->super.max_outstanding = 0; params->super.warmup_iter = 10000; params->super.alignment = ucs_get_page_size(); params->super.max_iter = 1000000l; params->super.max_time = 0.0; params->super.report_interval = 1.0; params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE; params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->super.uct.am_hdr_size = 8; params->super.send_mem_type = UCS_MEMORY_TYPE_HOST; params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST; params->super.msg_size_cnt = 1; params->super.iov_stride = 0; params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.am_hdr_size = 0; strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE); params->super.msg_size_list = calloc(params->super.msg_size_cnt, sizeof(*params->super.msg_size_list)); if (params->super.msg_size_list == NULL) { return UCS_ERR_NO_MEMORY; } params->super.msg_size_list[0] = 8; params->test_id = TEST_ID_UNDEFINED; return UCS_OK; } static ucs_status_t parse_test_params(perftest_params_t *params, char opt, const char *opt_arg) { char *optarg2 = NULL; test_type_t *test; unsigned i; switch (opt) { case 'd': ucs_snprintf_zero(params->super.uct.dev_name, sizeof(params->super.uct.dev_name), "%s", opt_arg); return UCS_OK; case 'x': ucs_snprintf_zero(params->super.uct.tl_name, sizeof(params->super.uct.tl_name), "%s", opt_arg); return UCS_OK; case 't': for (i = 0; tests[i].name != NULL; ++i) { test = &tests[i]; if (!strcmp(opt_arg, test->name)) { params->super.api = test->api; params->super.command = test->command; params->super.test_type = test->test_type; params->test_id = i; break; } } if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (!strcmp(opt_arg, "short")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (!strcmp(opt_arg, "shortiov")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT_IOV; } else if (!strcmp(opt_arg, "bcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (!strcmp(opt_arg, "zcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(opt_arg, &params->super.ucp.send_datatype)) { optarg2 = strchr(opt_arg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->super.ucp.recv_datatype)) { return UCS_ERR_INVALID_PARAM; } } } else { ucs_error("Invalid option argument for -D"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'E': if (!strcmp(opt_arg, "poll")) { params->super.wait_mode = UCX_PERF_WAIT_MODE_POLL; return UCS_OK; } else if (!strcmp(opt_arg, "sleep")) { params->super.wait_mode = UCX_PERF_WAIT_MODE_SLEEP; return UCS_OK; } else { ucs_error("Invalid option argument for -E"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'i': params->super.iov_stride = atol(opt_arg); return UCS_OK; case 'n': params->super.max_iter = atol(opt_arg); return UCS_OK; case 's': return parse_message_sizes_params(opt_arg, &params->super); case 'H': params->super.uct.am_hdr_size = atol(opt_arg); params->super.ucp.am_hdr_size = atol(opt_arg); return UCS_OK; case 'W': params->super.uct.fc_window = atoi(opt_arg); return UCS_OK; case 'O': params->super.max_outstanding = atoi(opt_arg); return UCS_OK; case 'w': params->super.warmup_iter = atol(opt_arg); return UCS_OK; case 'o': params->super.flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->super.flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->super.flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'U': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE; return UCS_OK; case 'I': params->super.flags |= UCX_PERF_TEST_FLAG_WAKEUP; return UCS_OK; case 'M': if (!strcmp(opt_arg, "single")) { params->super.thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (!strcmp(opt_arg, "serialized")) { params->super.thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (!strcmp(opt_arg, "multi")) { params->super.thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->super.thread_count = atoi(opt_arg); return UCS_OK; case 'A': if (!strcmp(opt_arg, "thread") || !strcmp(opt_arg, "thread_spinlock")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK; return UCS_OK; } else if (!strcmp(opt_arg, "thread_mutex")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_MUTEX; return UCS_OK; } else if (!strcmp(opt_arg, "signal")) { params->super.async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } case 'r': if (!strcmp(opt_arg, "recv_data")) { params->super.flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } else if (!strcmp(opt_arg, "recv")) { params->super.flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } return UCS_ERR_INVALID_PARAM; case 'm': if (UCS_OK != parse_mem_type_params(opt_arg, &params->super.send_mem_type, &params->super.recv_mem_type)) { return UCS_ERR_INVALID_PARAM; } return UCS_OK; default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t adjust_test_params(perftest_params_t *params, const char *error_prefix) { test_type_t *test; if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("%smissing test name", error_prefix); return UCS_ERR_INVALID_PARAM; } test = &tests[params->test_id]; if (params->super.max_outstanding == 0) { params->super.max_outstanding = test->window_size; } return UCS_OK; } static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name, int *line_num, perftest_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; char error_prefix[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } ++(*line_num); argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); ucs_snprintf_safe(error_prefix, sizeof(error_prefix), "in batch file '%s' line %d: ", file_name, *line_num); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("%s-%c %s: %s", error_prefix, c, optarg, ucs_status_string(status)); return status; } } status = adjust_test_params(params, error_prefix); if (status != UCS_OK) { return status; } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_cpus(char *opt_arg, struct perftest_context *ctx) { char *endptr, *cpu_list = opt_arg; int cpu; ctx->num_cpus = 0; cpu = strtol(cpu_list, &endptr, 10); while (((*endptr == ',') || (*endptr == '\0')) && (ctx->num_cpus < MAX_CPUS)) { if (cpu < 0) { ucs_error("invalid cpu number detected: (%d)", cpu); return UCS_ERR_INVALID_PARAM; } ctx->cpus[ctx->num_cpus++] = cpu; if (*endptr == '\0') { break; } cpu_list = endptr + 1; /* skip the comma */ cpu = strtol(cpu_list, &endptr, 10); } if (*endptr == ',') { ucs_error("number of listed cpus exceeds the maximum supported value (%d)", MAX_CPUS); return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); ucx_perf_global_init(); /* initialize memory types */ status = init_test_params(&ctx->params); if (status != UCS_OK) { return status; } ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; ctx->mpi = mpi_initialized; optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = optarg; } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; status = parse_cpus(optarg, ctx); if (status != UCS_OK) { return status; } break; case 'P': #ifdef HAVE_MPI ctx->mpi = atoi(optarg) && mpi_initialized; break; #endif case 'h': usage(ctx, ucs_basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, ucs_basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned snc; snc = magic; safe_send(group->connfd, &snc, sizeof(unsigned), progress, arg); snc = 0; safe_recv(group->connfd, &snc, sizeof(unsigned), progress, arg); ucs_assert(snc == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (status != UCS_OK) { goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); /* release the memory for the list of the message sizes allocated * during the initialization of the default testing parameters */ free(ctx->params.super.msg_size_list); ctx->params.super.msg_size_list = NULL; ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } if (ctx->params.super.msg_size_cnt != 0) { ctx->params.super.msg_size_list = calloc(ctx->params.super.msg_size_cnt, sizeof(*ctx->params.super.msg_size_list)); if (NULL == ctx->params.super.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } ret = safe_recv(connfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.super.msg_size_cnt != 0) { safe_send(sockfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = &ctx->sock_rte_group; ctx->params.super.rte = &sock_rte; ctx->params.super.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if defined (HAVE_MPI) static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp barrier #pragma omp master { /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } #elif defined (HAVE_RTE) static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { #if defined (HAVE_MPI) static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; int size, rank; ucs_trace_func(""); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = NULL; ctx->params.super.rte = &mpi_rte; ctx->params.super.report_arg = ctx; #elif defined (HAVE_RTE) ucs_trace_func(""); ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = group; ctx->params.super.rte = &ext_rte; ctx->params.super.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #ifdef HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { ucs_sys_cpuset_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { for (i = 0; i < ctx->num_cpus; i++) { if (ctx->cpus[i] >= nr_cpus) { ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } } for (i = 0; i < ctx->num_cpus; i++) { CPU_SET(ctx->cpus[i], &cpuset); } ret = ucs_sys_setaffinity(&cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = ucs_sys_getaffinity(&cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t clone_params(perftest_params_t *dest, const perftest_params_t *src) { size_t msg_size_list_size; *dest = *src; msg_size_list_size = dest->super.msg_size_cnt * sizeof(*dest->super.msg_size_list); dest->super.msg_size_list = malloc(msg_size_list_size); if (dest->super.msg_size_list == NULL) { return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK); } memcpy(dest->super.msg_size_list, src->super.msg_size_list, msg_size_list_size); return UCS_OK; } static ucs_status_t run_test_recurs(struct perftest_context *ctx, const perftest_params_t *parent_params, unsigned depth) { perftest_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; int line_num; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (parent_params->super.api == UCX_PERF_API_UCP) { if (strcmp(parent_params->super.uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.dev_name); } if (strcmp(parent_params->super.uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.tl_name); } } if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(&parent_params->super, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } line_num = 0; do { status = clone_params(&params, parent_params); if (status != UCS_OK) { goto out; } status = read_batch_file(batch_file, ctx->batch_files[depth], &line_num, &params, &ctx->test_names[depth]); if (status == UCS_OK) { run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); ctx->test_names[depth] = NULL; } free(params.super.msg_size_list); params.super.msg_size_list = NULL; } while (status == UCS_OK); if (status == UCS_ERR_NO_ELEM) { status = UCS_OK; } out: fclose(batch_file); return status; } static ucs_status_t run_test(struct perftest_context *ctx) { const char *error_prefix; ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); /* no batch files, only command line params */ if (ctx->num_batch_files == 0) { error_prefix = (ctx->flags & TEST_FLAG_PRINT_RESULTS) ? "command line: " : ""; status = adjust_test_params(&ctx->params, error_prefix); if (status != UCS_OK) { return status; } } print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #ifdef HAVE_MPI int provided; mpi_initialized = !isatty(0) && /* Using MPI_THREAD_FUNNELED since ucx_perftest supports * using multiple threads when only the main one makes * MPI calls (which is also suitable for a single threaded * run). * MPI_THREAD_FUNNELED: * The process may be multi-threaded, but only the main * thread will make MPI calls (all MPI calls are funneled * to the main thread). */ (MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0); if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) { printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n", provided); ret = -1; goto out; } #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out_msg_size_list; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #ifdef HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out_msg_size_list: free(ctx.params.super.msg_size_list); #if HAVE_MPI out: #endif if (mpi_initialized) { #ifdef HAVE_MPI MPI_Finalize(); #endif } return ret; }
GB_binop__pow_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_01__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fc32) // C=scalar+B GB (_bind1st__pow_fc32) // C=scalar+B' GB (_bind1st_tran__pow_fc32) // C=A+scalar GB (_bind2nd__pow_fc32) // C=A'+scalar GB (_bind2nd_tran__pow_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_cpowf (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_cpowf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FC32 || GxB_NO_POW_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__pow_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__pow_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_cpowf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_cpowf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_cpowf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_cpowf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p*inch*9; const float* k1 = kernel + (p+1)*inch*9; for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr0n = outptr0 + outw; float* outptr1n = outptr1 + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0+3); float32x4_t _k06 = vld1q_f32(k0+6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1+3); float32x4_t _k16 = vld1q_f32(k1+6); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _sum0n = vld1q_f32(outptr0n); float32x4_t _sum1n = vld1q_f32(outptr1n); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r10, _k00, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r11, _k00, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r12, _k00, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r20, _k03, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r21, _k03, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r22, _k03, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r30, _k06, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r31, _k06, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r32, _k06, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r10, _k10, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r11, _k10, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r12, _k10, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r20, _k13, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r21, _k13, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r22, _k13, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r30, _k16, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r31, _k16, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r32, _k16, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); vst1q_f32(outptr0n, _sum0n); vst1q_f32(outptr1n, _sum1n); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr0 += 4; outptr1 += 4; outptr0n += 4; outptr1n += 4; } #else if (nn > 0) { asm volatile( "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "pld [%8, #192] \n" "vld1.f32 {d28-d31}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q14, q15, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1 "vmla.f32 q6, q8, %e18[0] \n" "vmla.f32 q7, q8, %e21[0] \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3] \n"// _sum0n "pld [%4, #128] \n" "vld1.f32 {d26-d27}, [%4] \n"// _sum1n "vmla.f32 q12, q14, %e20[0] \n" "vmla.f32 q13, q14, %e23[0] \n" "vext.32 q8, q8, q9, #2 \n" "vext.32 q9, q14, q15, #1 \n" "vmla.f32 q6, q10, %e18[1] \n" "vmla.f32 q7, q10, %e21[1] \n" "vmla.f32 q12, q11, %f20[0] \n" "vmla.f32 q13, q11, %f23[0] \n" "pld [%6, #192] \n" "vld1.f32 {d28-d31}, [%6] \n"// r1 "add %6, #16 \n" "vmla.f32 q6, q8, %f18[0] \n" "vmla.f32 q7, q8, %f21[0] \n" "vmla.f32 q12, q9, %e20[1] \n" "vmla.f32 q13, q9, %e23[1] \n" "vext.32 q10, q14, q15, #1 \n" "vmla.f32 q6, q14, %e19[0] \n" "vmla.f32 q7, q14, %e22[0] \n" "vmla.f32 q12, q14, %e18[0] \n" "vmla.f32 q13, q14, %e21[0] \n" "vext.32 q11, q14, q15, #2 \n" "vmla.f32 q6, q10, %e19[1] \n" "vmla.f32 q7, q10, %e22[1] \n" "vmla.f32 q12, q10, %e18[1] \n" "vmla.f32 q13, q10, %e21[1] \n" "pld [%7, #192] \n" "vld1.f32 {d16-d18}, [%7 :64] \n"// r2 "add %7, #16 \n" "vmla.f32 q6, q11, %f19[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q12, q11, %f18[0] \n" "vmla.f32 q13, q11, %f21[0] \n" "vext.32 q10, q8, q9, #1 \n" "vmla.f32 q6, q8, %e20[0] \n" "vmla.f32 q7, q8, %e23[0] \n" "vmla.f32 q12, q8, %e19[0] \n" "vmla.f32 q13, q8, %e22[0] \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e20[1] \n" "vmla.f32 q7, q10, %e23[1] \n" "vmla.f32 q12, q10, %e19[1] \n" "vmla.f32 q13, q10, %e22[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "vmla.f32 q6, q11, %f20[0] \n" "vmla.f32 q7, q11, %f23[0] \n" "vmla.f32 q12, q11, %f19[0] \n" "vmla.f32 q13, q11, %f22[0] \n" "pld [%8, #192] \n" "vld1.f32 {d28-d31}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vst1.f32 {d12-d13}, [%1 : 64]!\n" "vst1.f32 {d14-d15}, [%2 : 64]!\n" "vext.32 q11, q14, q15, #2 \n" "vst1.f32 {d24-d25}, [%3]! \n" "vst1.f32 {d26-d27}, [%4]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %5, #16 \n" "sub %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); float32x4_t _sum0n = vmulq_f32(_r10, _k00); float32x4_t _sum1n = vmulq_f32(_r10, _k10); _sum0n = vmlaq_f32(_sum0n, _r20, _k03); _sum1n = vmlaq_f32(_sum1n, _r20, _k13); _sum0n = vmlaq_f32(_sum0n, _r30, _k06); _sum1n = vmlaq_f32(_sum1n, _r30, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); _sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3); _sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); *outptr0n = vaddvq_f32(_sum0n); *outptr1n = vaddvq_f32(_sum1n); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n)); float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); *outptr0n = vget_lane_f32(_ss01n, 0); *outptr1n = vget_lane_f32(_ss01n, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum0n = 0.f; float sum1 = 0.f; float sum1n = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; sum0n += r1[0] * k0[0]; sum0n += r1[1] * k0[1]; sum0n += r1[2] * k0[2]; sum0n += r2[0] * k0[3]; sum0n += r2[1] * k0[4]; sum0n += r2[2] * k0[5]; sum0n += r3[0] * k0[6]; sum0n += r3[1] * k0[7]; sum0n += r3[2] * k0[8]; sum1n += r1[0] * k1[0]; sum1n += r1[1] * k1[1]; sum1n += r1[2] * k1[2]; sum1n += r2[0] * k1[3]; sum1n += r2[1] * k1[4]; sum1n += r2[2] * k1[5]; sum1n += r3[0] * k1[6]; sum1n += r3[1] * k1[7]; sum1n += r3[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; *outptr0n += sum0n; *outptr1n += sum1n; #endif // __ARM_NEON r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr0n++; outptr1n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr1 += outw; outptr0n += outw; outptr1n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; outptr1 += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r0 "add %3, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n"// _sum1 "vmul.f32 q14, q8, %e12[0] \n" "vmul.f32 q15, q8, %e15[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e12[1] \n" "vmla.f32 q7, q10, %e15[1] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q14, q11, %f12[0] \n" "vmla.f32 q15, q11, %f15[0] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q14, q10, %e13[1] \n" "vmla.f32 q15, q10, %e16[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5] \n"// r2 "add %5, #16 \n" "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "vmla.f32 q14, q8, %e14[0] \n" "vmla.f32 q15, q8, %e17[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e14[1] \n" "vmla.f32 q7, q10, %e17[1] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmla.f32 q15, q11, %f17[0] \n" "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0++; r1++; r2++; outptr0++; outptr1++; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9; k1 += 9; } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k3456 = vld1q_f32(kernel0+3); float32x4_t _k6789 = vld1q_f32(kernel0+6); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum1 = vld1q_f32(outptr); float32x4_t _sum3 = vld1q_f32(outptr2); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0); float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r10, _k0123, 0); float32x4_t _sum4 = vmulq_laneq_f32(_r11, _k0123, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r12, _k0123, 2); _sum4 = vfmaq_laneq_f32(_sum4, _r20, _k3456, 0); _sum3 = vfmaq_laneq_f32(_sum3, _r21, _k3456, 1); _sum4 = vfmaq_laneq_f32(_sum4, _r22, _k3456, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r30, _k6789, 0); _sum4 = vfmaq_laneq_f32(_sum4, _r31, _k6789, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r32, _k6789, 2); _sum1 = vaddq_f32(_sum1, _sum2); _sum3 = vaddq_f32(_sum3, _sum4); vst1q_f32(outptr, _sum1); vst1q_f32(outptr2, _sum3); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1 :64] \n"// _sum "vmla.f32 q7, q9, %e14[0] \n" "vmul.f32 q6, q11, %e14[1] \n" "vmul.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// _sum2 "vmla.f32 q8, q9, %e14[0] \n" "vmul.f32 q14, q11, %e14[1] \n" "vmul.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); float32x4_t _sum2 = vmulq_f32(_r10, _k0123); _sum2 = vmlaq_f32(_sum2, _r20, _k3456); _sum2 = vmlaq_f32(_sum2, _r30, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); _sum2 = vsetq_lane_f32(*outptr2, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum1 = vld1q_f32(outptr); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0); float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2); _sum1 = vaddq_f32(_sum1, _sum2); vst1q_f32(outptr, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum "vmla.f32 q7, q8, %e10[0] \n" "vmul.f32 q13, q10, %e10[1] \n" "vmul.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd64_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8*8, w_tm/8 * h_tm/8, inch); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm = img0_tm.row(i * w_tm/8 + j); // TODO neon optimize for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; r0_tm += 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8*8, w_tm/8 * h_tm/8, outch); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output2_tm = vld1q_f32(output2_tm); float32x4_t _output3_tm = vld1q_f32(output3_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k00 = vld1q_f32(k00); k00 += 64; float32x4_t _k01 = vld1q_f32(k00); k00 += 64; float32x4_t _k02 = vld1q_f32(k00); k00 += 64; float32x4_t _k03 = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k02); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k03); float32x4_t _k10 = vld1q_f32(k10); k10 += 64; float32x4_t _k11 = vld1q_f32(k10); k10 += 64; float32x4_t _k12 = vld1q_f32(k10); k10 += 64; float32x4_t _k13 = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tm = vmlaq_f32(_output1_tm, _r2, _k12); _output1_tm = vmlaq_f32(_output1_tm, _r3, _k13); float32x4_t _k20 = vld1q_f32(k20); k20 += 64; float32x4_t _k21 = vld1q_f32(k20); k20 += 64; float32x4_t _k22 = vld1q_f32(k20); k20 += 64; float32x4_t _k23 = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tm = vmlaq_f32(_output2_tm, _r0, _k20); _output2_tm = vmlaq_f32(_output2_tm, _r1, _k21); _output2_tm = vmlaq_f32(_output2_tm, _r2, _k22); _output2_tm = vmlaq_f32(_output2_tm, _r3, _k23); float32x4_t _k30 = vld1q_f32(k30); k30 += 64; float32x4_t _k31 = vld1q_f32(k30); k30 += 64; float32x4_t _k32 = vld1q_f32(k30); k30 += 64; float32x4_t _k33 = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tm = vmlaq_f32(_output3_tm, _r0, _k30); _output3_tm = vmlaq_f32(_output3_tm, _r1, _k31); _output3_tm = vmlaq_f32(_output3_tm, _r2, _k32); _output3_tm = vmlaq_f32(_output3_tm, _r3, _k33); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output2_tm, _output2_tm); vst1q_f32(output3_tm, _output3_tm); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm); float32x4_t _output2_tmn = vld1q_f32(output2_tm); float32x4_t _output3_tmn = vld1q_f32(output3_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k00n = vld1q_f32(k00); k00 += 64; float32x4_t _k01n = vld1q_f32(k00); k00 += 64; float32x4_t _k02n = vld1q_f32(k00); k00 += 64; float32x4_t _k03n = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k02n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k03n); float32x4_t _k10n = vld1q_f32(k10); k10 += 64; float32x4_t _k11n = vld1q_f32(k10); k10 += 64; float32x4_t _k12n = vld1q_f32(k10); k10 += 64; float32x4_t _k13n = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); _output1_tmn = vmlaq_f32(_output1_tmn, _r2n, _k12n); _output1_tmn = vmlaq_f32(_output1_tmn, _r3n, _k13n); float32x4_t _k20n = vld1q_f32(k20); k20 += 64; float32x4_t _k21n = vld1q_f32(k20); k20 += 64; float32x4_t _k22n = vld1q_f32(k20); k20 += 64; float32x4_t _k23n = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tmn = vmlaq_f32(_output2_tmn, _r0n, _k20n); _output2_tmn = vmlaq_f32(_output2_tmn, _r1n, _k21n); _output2_tmn = vmlaq_f32(_output2_tmn, _r2n, _k22n); _output2_tmn = vmlaq_f32(_output2_tmn, _r3n, _k23n); float32x4_t _k30n = vld1q_f32(k30); k30 += 64; float32x4_t _k31n = vld1q_f32(k30); k30 += 64; float32x4_t _k32n = vld1q_f32(k30); k30 += 64; float32x4_t _k33n = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tmn = vmlaq_f32(_output3_tmn, _r0n, _k30n); _output3_tmn = vmlaq_f32(_output3_tmn, _r1n, _k31n); _output3_tmn = vmlaq_f32(_output3_tmn, _r2n, _k32n); _output3_tmn = vmlaq_f32(_output3_tmn, _r3n, _k33n); vst1q_f32(output0_tm, _output0_tmn); vst1q_f32(output1_tm, _output1_tmn); vst1q_f32(output2_tm, _output2_tmn); vst1q_f32(output3_tm, _output3_tmn); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; } #else // __aarch64__ asm volatile( "mov r4, #8 \n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "0: \n" "pld [%4, #256] \n" "vld1.f32 {d0-d3}, [%4 :128]! \n"//q0 q1 = _r0 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k00 "add %8, %8, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]\n"//q12 q13 = _output1_tm "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k10 "add %9, %9, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n"//q2 q3 = _r1 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k01 "add %8, %8, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k11 "add %9, %9, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]!\n"//q4 q5 = _r2 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k02 "add %8, %8, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k12 "add %9, %9, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%7, #256] \n" "vld1.f32 {d12-d15}, [%7 :128]!\n"//q6 q7 = _r3 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k03 "sub %8, %8, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k13 "sub %9, %9, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%0 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]\n"//q8 q9 = _output2_tm "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k20 "add %10, %10, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "vst1.f32 {d24-d27}, [%1 :128]!\n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]\n"//q12 q13 = _output3_tm "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k30 "add %11, %11, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k21 "add %10, %10, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k31 "add %11, %11, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k22 "add %10, %10, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k32 "add %11, %11, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k23 "sub %10, %10, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k33 "sub %11, %11, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%2 :128]!\n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "subs r4, r4, #1 \n" "vst1.f32 {d24-d27}, [%3 :128]!\n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(k00), // %8 "=r"(k10), // %9 "=r"(k20), // %10 "=r"(k30) // %11 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(k00), "9"(k10), "10"(k20), "11"(k30) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k00 -= 64; k10 -= 64; k20 -= 64; k30 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k00[m]; k00 += 64; output0_tm[m] += r1[m] * k00[m]; k00 += 64; output0_tm[m] += r2[m] * k00[m]; k00 += 64; output0_tm[m] += r3[m] * k00[m]; k00 += 64; k00 -= 64 * 4; output1_tm[m] += r0[m] * k10[m]; k10 += 64; output1_tm[m] += r1[m] * k10[m]; k10 += 64; output1_tm[m] += r2[m] * k10[m]; k10 += 64; output1_tm[m] += r3[m] * k10[m]; k10 += 64; k10 -= 64 * 4; output2_tm[m] += r0[m] * k20[m]; k20 += 64; output2_tm[m] += r1[m] * k20[m]; k20 += 64; output2_tm[m] += r2[m] * k20[m]; k20 += 64; output2_tm[m] += r3[m] * k20[m]; k20 += 64; k20 -= 64 * 4; output3_tm[m] += r0[m] * k30[m]; k30 += 64; output3_tm[m] += r1[m] * k30[m]; k30 += 64; output3_tm[m] += r2[m] * k30[m]; k30 += 64; output3_tm[m] += r3[m] * k30[m]; k30 += 64; k30 -= 64 * 4; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output1_tm[m] += r0[m] * k1[m]; output2_tm[m] += r0[m] * k2[m]; output3_tm[m] += r0[m] * k3[m]; } r0 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; } } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); const float* k2 = kernel0_tm.row(q+2); const float* k3 = kernel0_tm.row(q+3); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k2); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k3); vst1q_f32(output0_tm, _output0_tm); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k0n = vld1q_f32(k0); float32x4_t _k1n = vld1q_f32(k1); float32x4_t _k2n = vld1q_f32(k2); float32x4_t _k3n = vld1q_f32(k3); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k2n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k3n); vst1q_f32(output0_tm, _output0_tmn); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; } #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "mov r4, %0 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "vmla.f32 q15, q9, q11 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(k0), // %5 "=r"(k1), // %6 "=r"(k2), // %7 "=r"(k3) // %8 : "0"(output0_tm), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(k0), "6"(k1), "7"(k2), "8"(k3) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k0 -= 64; k1 -= 64; k2 -= 64; k3 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; output0_tm[m] += r2[m] * k2[m]; output0_tm[m] += r3[m] * k3[m]; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 64; output0_tm += 64; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm = out0_tm.row(i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; // TODO neon optimize for (int m=0; m<8; m++) { float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm += 8; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm01 = img0_tm.row(i * w_tm/8 + j); float* r0_tm23 = img0_tm.row(tiles + i * w_tm/8 + j); float* r0_tm45 = img0_tm.row(tiles * 2 + i * w_tm/8 + j); float* r0_tm67 = img0_tm.row(tiles * 3 + i * w_tm/8 + j); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[4] = { r0_tm01, r0_tm23, r0_tm45, r0_tm67 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m/2] + (m%2) * 8; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k1n = vld1q_f32(k1+4); float32x4_t _k1nn = vld1q_f32(k1+8); float32x4_t _k1nnn = vld1q_f32(k1+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; float32x4_t _k1; float32x4_t _k1n; float32x4_t _k1nn; float32x4_t _k1nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #512] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" "vld1.f32 {%e6-%f6}, [%0 :128]! \n" "vld1.f32 {%e8-%f8}, [%1 :128]! \n" "vld1.f32 {%e7-%f7}, [%0 :128]! \n" "vld1.f32 {%e9-%f9}, [%1 :128]! \n" : "=r"(k0), // %0 "=r"(k1), // %1 "=w"(_k0), // %2 "=w"(_k0n), // %3 "=w"(_k1), // %4 "=w"(_k1n), // %5 "=w"(_k0nn), // %6 "=w"(_k0nnn), // %7 "=w"(_k1nn), // %8 "=w"(_k1nnn) // %9 : "0"(k0), "1"(k1) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "mov r4, %1 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "0: \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "subs %0, #1 \n" "vst1.f32 {d20-d23}, [r4 :128]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k0), // %8 "w"(_k0n), // %9 "w"(_k1), // %10 "w"(_k1n), // %11 "w"(_k0nn), // %12 "w"(_k0nnn), // %13 "w"(_k1nn), // %14 "w"(_k1nnn) // %15 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q9, q13, %q7 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "vmla.f32 q8, q14, %q8 \n" "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q9, q15, %q9 \n" "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q10, q14, %q12 \n" "vmla.f32 q11, q15, %q13 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k0), // %6 "w"(_k0n), // %7 "w"(_k1), // %8 "w"(_k1n), // %9 "w"(_k0nn), // %10 "w"(_k0nnn), // %11 "w"(_k1nn), // %12 "w"(_k1nnn) // %13 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; } r0 += 16; r1 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; k1 += 16; #endif // __aarch64__ #else k0 += 16; k1 += 16; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" : "=r"(k0), // %0 "=w"(_k0), // %1 "=w"(_k0n), // %2 "=w"(_k0nn), // %3 "=w"(_k0nnn) // %4 : "0"(k0) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q6 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "vmla.f32 q11, q13, %q7 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k0), // %4 "w"(_k0n), // %5 "w"(_k0nn), // %6 "w"(_k0nnn) // %7 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; #endif // __aarch64__ #else k0 += 16; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm01 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm23 = out0_tm.row(tiles + i * w_tm/8 + j); const float* output0_tm45 = out0_tm.row(tiles * 2 + i * w_tm/8 + j); const float* output0_tm67 = out0_tm.row(tiles * 3 + i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[4] = { output0_tm01, output0_tm23, output0_tm45, output0_tm67 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m/2] + (m%2) * 8; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon3(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles * 2); float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles * 3); float* r0_tm4 = img0_tm.row(i * w_tm/8 + j + tiles * 4); float* r0_tm5 = img0_tm.row(i * w_tm/8 + j + tiles * 5); float* r0_tm6 = img0_tm.row(i * w_tm/8 + j + tiles * 6); float* r0_tm7 = img0_tm.row(i * w_tm/8 + j + tiles * 7); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[8] = { r0_tm0, r0_tm1, r0_tm2, r0_tm3, r0_tm4, r0_tm5, r0_tm6, r0_tm7 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); out0_tm.fill(0.f); out1_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); float32x4_t _k10 = vld1q_f32(k10); float32x4_t _k10n = vld1q_f32(k10+4); float32x4_t _k11 = vld1q_f32(k11); float32x4_t _k11n = vld1q_f32(k11+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; float32x4_t _k10; float32x4_t _k10n; float32x4_t _k11; float32x4_t _k11n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e6-%f6}, [%1 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {%e8-%f8}, [%2 :128]! \n" "pld [%3, #256] \n" "vld1.f32 {%e10-%f10}, [%3 :128]! \n" "vld1.f32 {%e5-%f5}, [%0 :128]! \n" "vld1.f32 {%e7-%f7}, [%1 :128]! \n" "vld1.f32 {%e9-%f9}, [%2 :128]! \n" "vld1.f32 {%e11-%f11}, [%3 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=r"(k10), // %2 "=r"(k11), // %3 "=w"(_k00), // %4 "=w"(_k00n), // %5 "=w"(_k01), // %6 "=w"(_k01n), // %7 "=w"(_k10), // %8 "=w"(_k10n), // %9 "=w"(_k11), // %10 "=w"(_k11n) // %11 : "0"(k00), "1"(k01), "2"(k10), "3"(k11) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(r1) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(r1), "w"(_k00), // %10 "w"(_k00n), // %11 "w"(_k01), // %12 "w"(_k01n), // %13 "w"(_k10), // %14 "w"(_k10n), // %15 "w"(_k11), // %16 "w"(_k11n) // %17 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; #else asm volatile( "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(output0_tm), "1"(output1_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n), // %11 "w"(_k10), // %12 "w"(_k10n), // %13 "w"(_k11), // %14 "w"(_k11n) // %15 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; output1_tm[m] += r0[m] * k10[m]; output1_tm[m] += r1[m] * k11[m]; } r0 += 8; r1 += 8; output0_tm += 8; output1_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __ARM_NEON } } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=w"(_k00), // %2 "=w"(_k00n), // %3 "=w"(_k01), // %4 "=w"(_k01n) // %5 : "0"(k00), "1"(k01) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n) // %11 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "vmla.f32 q9, q13, %q7 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q8 \n" "vmla.f32 q9, q15, %q9 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k00), // %6 "w"(_k00n), // %7 "w"(_k01), // %8 "w"(_k01n) // %9 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; } r0 += 8; r1 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k00 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); #else float32x4_t _k00; float32x4_t _k00n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" : "=r"(k00), // %0 "=w"(_k00), // %1 "=w"(_k00n) // %2 : "0"(k00) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00), // %4 "w"(_k00n) // %5 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; } r0 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; #endif // __aarch64__ #else k00 += 8; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles * 2); const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles * 3); const float* output0_tm4 = out0_tm.row(i * w_tm/8 + j + tiles * 4); const float* output0_tm5 = out0_tm.row(i * w_tm/8 + j + tiles * 5); const float* output0_tm6 = out0_tm.row(i * w_tm/8 + j + tiles * 6); const float* output0_tm7 = out0_tm.row(i * w_tm/8 + j + tiles * 7); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[8] = { output0_tm0, output0_tm1, output0_tm2, output0_tm3, output0_tm4, output0_tm5, output0_tm6, output0_tm7 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m]; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _r0 = vld2q_f32(r0); float32x4x2_t _r0n = vld2q_f32(r0+8); float32x4_t _r00 = _r0.val[0];// 0 2 4 6 float32x4_t _r01 = _r0.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0n.val[0], 1);// 2 4 6 8 _outp = vfmaq_laneq_f32(_outp, _r00, _k0123, 0); _outp = vfmaq_laneq_f32(_outp, _r01, _k0123, 1); _outp = vfmaq_laneq_f32(_outp, _r02, _k0123, 2); float32x4x2_t _r1 = vld2q_f32(r1); float32x4x2_t _r1n = vld2q_f32(r1+8); float32x4_t _r10 = _r1.val[0]; float32x4_t _r11 = _r1.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1n.val[0], 1); _outp = vfmaq_laneq_f32(_outp, _r10, _k3456, 0); _outp = vfmaq_laneq_f32(_outp, _r11, _k3456, 1); _outp = vfmaq_laneq_f32(_outp, _r12, _k3456, 2); float32x4x2_t _r2 = vld2q_f32(r2); float32x4x2_t _r2n = vld2q_f32(r2+8); float32x4_t _r20 = _r2.val[0]; float32x4_t _r21 = _r2.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2n.val[0], 1); _outp = vfmaq_laneq_f32(_outp, _r20, _k6789, 0); _outp = vfmaq_laneq_f32(_outp, _r21, _k6789, 1); _outp = vfmaq_laneq_f32(_outp, _r22, _k6789, 2); vst1q_f32(outptr, _outp); r0 += 8; r1 += 8; r2 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } }
multiplication_matrix_vector_parallel_openMP.c
#include <omp.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <sys/wait.h> #include <sys/time.h> #include <sys/types.h> // Variaveis definidas #define row_matriz_A 800 #define col_matriz_A 800 #define upper 1000 #define lower 0 #define THREADS_NUM 4 // A[r,c] * X[c] = B[r] // FUNCAO - GERAR MATRIZES void gerarMatrizes(int r, int c, int mA[], int vX[]){ // Variaveis int i; // indice linha para as matrizes/vetor int j; // indice coluna para as matrizes/vetor srandom(time(0)); // Seed // Loop para inserir os valores na matrizA[r][c] for(i = 0; i < r; i++){ for(j = 0; j < c; j++){ mA[i * c + j] = (random() % (upper - lower + 1)) + lower; } } // Loop para inserir os valores no vetorX[c] for(j = 0; j < c; j++) vX[j] = (random() % (upper - lower + 1)) + lower; // Retornar para o main return; } // FUNCAO - MULTIPLICAÇÃO SEQUENCIAL void multSequencial(int r, int c, int mA[], int vX[], int vBS[]){ // Variaveis int i; // indice linha para as matrizes/vetor int j; // indice coluna para as matrizes/vetor int sum; // Resultado struct timeval tv1; // auxiliar para pegar o valor de tempo inicial struct timeval tv2; // auxiliar para pegar o valor de tempo final double t1; // tempo inicial de execucao double t2; // tempo final de execucao // Recupera o tempo inicial gettimeofday(&tv1, NULL); t1 = (double)(tv1.tv_sec) + (double)(tv1.tv_usec)/ 1000000.00; // MULTIPLICACAO - A[r,c] * X[c] = B[r] for(i = 0; i < r; i++){ sum = 0; for(j = 0; j < c; j++){ sum += mA[i * c + j] * vX[j]; } vBS[i] = sum; } // Recuperar tempo final de execucao gettimeofday(&tv2, NULL); t2 = (double)(tv2.tv_sec) + (double)(tv2.tv_usec)/ 1000000.00; printf("%lf;", (t2 - t1)); // Retornar para o main return; } // FUNCAO - MULTIPLICACAO PARALELA COM OpenMP void matvecHost(int r, int c, int mA[], int vX[], int vBP[]){ // Variaveis int i; // indice linha para as matrizes/vetor int j; // indice coluna para as matrizes/vetor int sum; // Resultado struct timeval tv1; // auxiliar para pegar o valor de tempo inicial struct timeval tv2; // auxiliar para pegar o valor de tempo final double t1; // tempo inicial de execucao double t2; // tempo final de execucao // Recupera o tempo inicial gettimeofday(&tv1, NULL); t1 = (double)(tv1.tv_sec) + (double)(tv1.tv_usec)/ 1000000.00; // Gerar numero de threads igual a existente fisicamente omp_set_num_threads(THREADS_NUM); // Bloco paralelo #pragma omp parallel private(i, j, sum) shared(mA, vBP, vX) { #pragma omp for for(i = 0; i < r; i++){ sum = 0; for(j = 0; j < c; j++){ sum += mA[i * c + j] * vX[j]; } vBP[i] = sum; } } // Recuperar tempo final gettimeofday(&tv2, NULL); t2 = (double)(tv2.tv_sec) + (double)(tv2.tv_usec)/ 1000000.00; // debug - verifica o resultado // calcula e imprime o tempo total de execucao printf("%lf\n", (t2 - t1)); // Retornar return; } // FUNCAO - MULTIPLICACAO PARALELA COM CUDA void matvecDevice(int r, int c, int mA[], int vX[], int vBP[]) { printf("matvecDevice\n"); return; } // FUNCAO - PRINCIPAL int main(int argc, char * argv[]) { // Inicializacoes de valores int i; int b; int row; int col; int verifyB = 0; int benchmarks = 10; row = row_matriz_A; col = col_matriz_A; // Inicializacoes de matrizes e vetores dinâmicamente int *matrizA = (int *)calloc(row * col, sizeof(int)); int *vectorX = (int *)calloc(col * 1, sizeof(int)); int *vectorBS = (int *)calloc(row * 1, sizeof(int)); int *vectorBP = (int *)calloc(row * 1, sizeof(int)); // Atribuicao da opcao int opcao = atoi(argv[1]); // Benchmarks printf("linhas - %d | colunas - %d\nbenchmark;serial;paralelismo\n",row,col); for(b = 0; b < benchmarks; b++){ // Gerar matrizes para avaliacao gerarMatrizes(row, col, matrizA, vectorX); // Print benchmark run printf("%d;",b); // Fazer multiplicacao sequencial sem paralelismo multSequencial(row, col, matrizA, vectorX, vectorBS); // Paralelismo por CPU if(opcao == 1) matvecHost(row, col, matrizA, vectorX, vectorBP); // Paralelismo por GPU else if(opcao == 2) matvecDevice(row, col, matrizA, vectorX, vectorBP); // Verificacao do vetor B for(i=0;i<row;i++){ if(vectorBS[i] != vectorBP[i]){ printf("Elemento distinto | i = %d | BS[i] = %d | BP[i] = %d\n",i,vectorBS[i],vectorBP[i]); verifyB = 1; } } if(verifyB == 1) printf("ERROR - Vetores B distintos!\n"); } // Finalizacao return 0; }
transpose_conj_c_coo.c
#include "alphasparse/format.h" #include <stdlib.h> #include <alphasparse/opt.h> #include <alphasparse/util.h> #include <alphasparse/compute.h> #include <memory.h> static int row_first_cmp(const ALPHA_Point *a, const ALPHA_Point *b) { if (a->x != b->x) return a->x - b->x; return a->y - b->y; } alphasparse_status_t ONAME(const ALPHA_SPMAT_COO *s, ALPHA_SPMAT_COO **d) { ALPHA_INT nnz = s->nnz; ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_Point *points = alpha_memalign(nnz * sizeof(ALPHA_Point), DEFAULT_ALIGNMENT); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < nnz; ++i) { points[i].x = s->col_indx[i]; points[i].y = s->row_indx[i]; points[i].v = s->values[i]; } qsort(points, nnz, sizeof(ALPHA_Point), (__compar_fn_t)row_first_cmp); ALPHA_SPMAT_COO *mat = alpha_malloc(sizeof(ALPHA_SPMAT_COO)); *d = mat; mat->rows = s->cols; mat->cols = s->rows; mat->nnz = s->nnz; mat->row_indx = alpha_memalign(sizeof(ALPHA_INT) * nnz, DEFAULT_ALIGNMENT); mat->col_indx = alpha_memalign(sizeof(ALPHA_INT) * nnz, DEFAULT_ALIGNMENT); mat->values = alpha_memalign(sizeof(ALPHA_Number) * nnz, DEFAULT_ALIGNMENT); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < nnz; i++) { mat->row_indx[i] = points[i].x; mat->col_indx[i] = points[i].y; alpha_conj(mat->values[i], points[i].v); } alpha_free(points); return ALPHA_SPARSE_STATUS_SUCCESS; }
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "xgboost/base.h" #include "xgboost/json.h" #include "xgboost/tree_updater.h" #include "param.h" #include "constraints.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Configure(const Args& args) override { param_.UpdateAllowUnknown(args); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("train_param"), &this->param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["train_param"] = ToJson(param_); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = page[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const { std::vector<bst_feature_t> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { { // setup position position_.resize(gpair.size()); std::fill(position_.begin(), position_.end(), 0); // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { CHECK_EQ(param_.sampling_method, TrainParam::kUniform) << "Only uniform sampling is supported, " << "gradient-based sampling is only support by GPU Hist."; std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); qexpand_.push_back(0); this->UpdateNode2WorkIndex(tree); } this->interaction_constraints_.Configure(param_, fmat.Info().num_col_); } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { auto page = batch.GetView(); for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = page[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (auto fid : fsplits) { auto col = page[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid : qexpand_) { thread_temp[tid][nid] = TStats(); } } // setup position const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } } // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; FeatureInteractionConstraintHost interaction_constraints_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
deconvolution_4x4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void deconv4x4s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*16 + q*16; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i); float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; float* outptr3 = outptr2 + outw; int j = 0; #if __ARM_NEON for (; j+3<w; j+=4) { float32x4_t _v = vld1q_f32(r0); // float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); vst1q_f32(outptr0 + 0, _out00); float32x4_t _out01 = vld1q_f32(outptr0 + 1); _out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1); vst1q_f32(outptr0 + 1, _out01); float32x4_t _out02 = vld1q_f32(outptr0 + 2); _out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0); vst1q_f32(outptr0 + 2, _out02); float32x4_t _out03 = vld1q_f32(outptr0 + 3); _out03 = vmlaq_lane_f32(_out03, _v, vget_high_f32(_k0), 1); vst1q_f32(outptr0 + 3, _out03); // float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); vst1q_f32(outptr1 + 0, _out10); float32x4_t _out11 = vld1q_f32(outptr1 + 1); _out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1); vst1q_f32(outptr1 + 1, _out11); float32x4_t _out12 = vld1q_f32(outptr1 + 2); _out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0); vst1q_f32(outptr1 + 2, _out12); float32x4_t _out13 = vld1q_f32(outptr1 + 3); _out13 = vmlaq_lane_f32(_out13, _v, vget_high_f32(_k1), 1); vst1q_f32(outptr1 + 3, _out13); // float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); vst1q_f32(outptr2 + 0, _out20); float32x4_t _out21 = vld1q_f32(outptr2 + 1); _out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1); vst1q_f32(outptr2 + 1, _out21); float32x4_t _out22 = vld1q_f32(outptr2 + 2); _out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0); vst1q_f32(outptr2 + 2, _out22); float32x4_t _out23 = vld1q_f32(outptr2 + 3); _out23 = vmlaq_lane_f32(_out23, _v, vget_high_f32(_k2), 1); vst1q_f32(outptr2 + 3, _out23); // float32x4_t _out30 = vld1q_f32(outptr3 + 0); _out30 = vmlaq_lane_f32(_out30, _v, vget_low_f32(_k3), 0); vst1q_f32(outptr3 + 0, _out30); float32x4_t _out31 = vld1q_f32(outptr3 + 1); _out31 = vmlaq_lane_f32(_out31, _v, vget_low_f32(_k3), 1); vst1q_f32(outptr3 + 1, _out31); float32x4_t _out32 = vld1q_f32(outptr3 + 2); _out32 = vmlaq_lane_f32(_out32, _v, vget_high_f32(_k3), 0); vst1q_f32(outptr3 + 2, _out32); float32x4_t _out33 = vld1q_f32(outptr3 + 3); _out33 = vmlaq_lane_f32(_out33, _v, vget_high_f32(_k3), 1); vst1q_f32(outptr3 + 3, _out33); r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr0[3] += val * k0[3]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr1[3] += val * k1[3]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; outptr2[3] += val * k2[3]; outptr3[0] += val * k3[0]; outptr3[1] += val * k3[1]; outptr3[2] += val * k3[2]; outptr3[3] += val * k3[3]; r0++; outptr0++; outptr1++; outptr2++; outptr3++; } } } } } static void deconv4x4s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*16 + q*16; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i*2); float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; float* outptr3 = outptr2 + outw; int j = 0; #if __ARM_NEON for (; j+3<w; j+=4) { float32x4_t _v = vld1q_f32(r0); // row 0 float32x4x2_t _out0 = vld2q_f32(outptr0); // 0,2,4,6 _out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_low_f32(_k0), 0); // 1,3,5,7 _out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_low_f32(_k0), 1); vst2q_f32(outptr0, _out0); _out0 = vld2q_f32(outptr0 + 2); // 2,4,6,8 _out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_high_f32(_k0), 0); // 3,5,7,9 _out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_high_f32(_k0), 1); vst2q_f32(outptr0 + 2, _out0); // row 1 float32x4x2_t _out1 = vld2q_f32(outptr1); // 0,2,4,6 _out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_low_f32(_k1), 0); // 1,3,5,7 _out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_low_f32(_k1), 1); vst2q_f32(outptr1, _out1); _out1 = vld2q_f32(outptr1 + 2); // 2,4,6,8 _out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_high_f32(_k1), 0); // 3,5,7,9 _out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_high_f32(_k1), 1); vst2q_f32(outptr1 + 2, _out1); // row 2 float32x4x2_t _out2 = vld2q_f32(outptr2); _out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_low_f32(_k2), 0); _out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_low_f32(_k2), 1); vst2q_f32(outptr2, _out2); _out2 = vld2q_f32(outptr2 + 2); _out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_high_f32(_k2), 0); _out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_high_f32(_k2), 1); vst2q_f32(outptr2 + 2, _out2); // row 3 float32x4x2_t _out3 = vld2q_f32(outptr3); _out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_low_f32(_k3), 0); _out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_low_f32(_k3), 1); vst2q_f32(outptr3, _out3); _out3 = vld2q_f32(outptr3 + 2); _out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_high_f32(_k3), 0); _out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_high_f32(_k3), 1); vst2q_f32(outptr3 + 2, _out3); r0 += 4; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr0[3] += val * k0[3]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr1[3] += val * k1[3]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; outptr2[3] += val * k2[3]; outptr3[0] += val * k3[0]; outptr3[1] += val * k3[1]; outptr3[2] += val * k3[2]; outptr3[3] += val * k3[3]; r0++; outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; } } } } }
ext_problem.h
#pragma once #pragma omp declare target // This file contains a list of the global problem variables // such as grid size, angles, energy groups, etc. int nx; int ny; int nz; int ng; int nang; int noct; int cmom; int nmom; int nmat; int ichunk; int timesteps; double dt; double dx; double dy; double dz; int outers; int inners; double epsi; double tolr; // Data double* source; double* flux_in; double* flux_out; double* flux_i; double* flux_j; double* flux_k; double* denom; double dd_i; double* dd_j; double* dd_k; double* mu; double* eta; double* xi; double* scat_coeff; double* time_delta; double* total_cross_section; double* weights; double* velocity; double* scalar_flux; double* xs; int* mat; double* fixed_source; double* gg_cs; int* lma; double* g2g_source; double* scalar_mom; double* scat_cs; unsigned int* groups_todo; double* old_outer_scalar; double* old_inner_scalar; double* new_scalar; // Global variable for the timestep unsigned int global_timestep; #pragma omp end declare target
convolution_sgemm_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_packnto1_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); // Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else tmp.create(maxk, inch, size, 2u * packn, packn, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr[2] = img0[l + packn * 2]; tmpptr[3] = img0[l + packn * 3]; tmpptr[4] = img0[l + packn * 4]; tmpptr[5] = img0[l + packn * 5]; tmpptr[6] = img0[l + packn * 6]; tmpptr[7] = img0[l + packn * 7]; tmpptr += 8; } img0 += size * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(img0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(img0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(img0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(img0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); img0 += size * packn; tmpptr += packn * 8; #endif } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr[2] = img0[l + packn * 2]; tmpptr[3] = img0[l + packn * 3]; tmpptr += 4; } img0 += size * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); img0 += size * packn; tmpptr += packn * 4; #endif } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr += 2; } img0 += size * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); img0 += size * packn; tmpptr += packn * 2; #endif } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(img0, vl); vse16_v_f16m1(tmpptr, _val, vl); img0 += size * packn; tmpptr += packn; } } } } int nn_outch = outch / packn; int remain_outch_start = nn_outch * packn; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * packn; __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[packn] = {0.f}; const __fp16* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7 < size; i += 8) { const __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr0 = kernel.channel(p / packn); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum2 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum3 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum4 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum5 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum6 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum7 = vle16_v_f16m1(biasptr, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; __fp16 val2 = *tmpptr++; __fp16 val3 = *tmpptr++; __fp16 val4 = *tmpptr++; __fp16 val5 = *tmpptr++; __fp16 val6 = *tmpptr++; __fp16 val7 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); kptr0 += packn; } #if C906 vsse16_v_f16m1(outptr0, top_blob.cstep * sizeof(__fp16), _sum0, vl); vsse16_v_f16m1(outptr0 + 1, top_blob.cstep * sizeof(__fp16), _sum1, vl); vsse16_v_f16m1(outptr0 + 2, top_blob.cstep * sizeof(__fp16), _sum2, vl); vsse16_v_f16m1(outptr0 + 3, top_blob.cstep * sizeof(__fp16), _sum3, vl); vsse16_v_f16m1(outptr0 + 4, top_blob.cstep * sizeof(__fp16), _sum4, vl); vsse16_v_f16m1(outptr0 + 5, top_blob.cstep * sizeof(__fp16), _sum5, vl); vsse16_v_f16m1(outptr0 + 6, top_blob.cstep * sizeof(__fp16), _sum6, vl); vsse16_v_f16m1(outptr0 + 7, top_blob.cstep * sizeof(__fp16), _sum7, vl); #else vssseg8e16_v_f16m1x8(outptr0, top_blob.cstep * sizeof(__fp16), vcreate_f16m1x8(_sum0, _sum1, _sum2, _sum3, _sum4, _sum5, _sum6, _sum7), vl); #endif outptr0 += 8; } for (; i + 3 < size; i += 4) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr0 = kernel.channel(p / packn); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum2 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum3 = vle16_v_f16m1(biasptr, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; __fp16 val2 = *tmpptr++; __fp16 val3 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); kptr0 += packn; } #if C906 vsse16_v_f16m1(outptr0, top_blob.cstep * sizeof(__fp16), _sum0, vl); vsse16_v_f16m1(outptr0 + 1, top_blob.cstep * sizeof(__fp16), _sum1, vl); vsse16_v_f16m1(outptr0 + 2, top_blob.cstep * sizeof(__fp16), _sum2, vl); vsse16_v_f16m1(outptr0 + 3, top_blob.cstep * sizeof(__fp16), _sum3, vl); #else vssseg4e16_v_f16m1x4(outptr0, top_blob.cstep * sizeof(__fp16), vcreate_f16m1x4(_sum0, _sum1, _sum2, _sum3), vl); #endif outptr0 += 4; } for (; i + 1 < size; i += 2) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* kptr0 = kernel.channel(p / packn); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vle16_v_f16m1(biasptr, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(biasptr, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); kptr0 += packn; } #if C906 vsse16_v_f16m1(outptr0, top_blob.cstep * sizeof(__fp16), _sum0, vl); vsse16_v_f16m1(outptr0 + 1, top_blob.cstep * sizeof(__fp16), _sum1, vl); #else vssseg2e16_v_f16m1x2(outptr0, top_blob.cstep * sizeof(__fp16), vcreate_f16m1x2(_sum0, _sum1), vl); #endif outptr0 += 2; } for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* kptr0 = kernel.channel(p / packn); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum = vle16_v_f16m1(biasptr, vl); for (int j = 0; j < nn; j++) { __fp16 val = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); kptr0 += packn; } vsse16_v_f16m1(outptr0, top_blob.cstep * sizeof(__fp16), _sum, vl); outptr0 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 bias0 = bias ? bias[p] : 0.f; int i = 0; for (; i + 7 < size; i += 8) { const __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr0 = kernel.channel(p / packn + p % packn); int nn = inch * maxk; // inch always > 0 __fp16 sum0 = bias0; __fp16 sum1 = bias0; __fp16 sum2 = bias0; __fp16 sum3 = bias0; __fp16 sum4 = bias0; __fp16 sum5 = bias0; __fp16 sum6 = bias0; __fp16 sum7 = bias0; vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { vfloat16m1x8_t _val01 = vlseg8e16_v_f16m1x8(tmpptr, vl); vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vv_f16m1(_sum0, vget_f16m1x8_f16m1(_val01, 0), _w0, vl); _sum1 = vfmacc_vv_f16m1(_sum1, vget_f16m1x8_f16m1(_val01, 1), _w0, vl); _sum2 = vfmacc_vv_f16m1(_sum2, vget_f16m1x8_f16m1(_val01, 2), _w0, vl); _sum3 = vfmacc_vv_f16m1(_sum3, vget_f16m1x8_f16m1(_val01, 3), _w0, vl); _sum4 = vfmacc_vv_f16m1(_sum4, vget_f16m1x8_f16m1(_val01, 4), _w0, vl); _sum5 = vfmacc_vv_f16m1(_sum5, vget_f16m1x8_f16m1(_val01, 5), _w0, vl); _sum6 = vfmacc_vv_f16m1(_sum6, vget_f16m1x8_f16m1(_val01, 6), _w0, vl); _sum7 = vfmacc_vv_f16m1(_sum7, vget_f16m1x8_f16m1(_val01, 7), _w0, vl); tmpptr += packn * 8; kptr0 += packn; } #if C906 // TODO std::vector<__fp16> ss0(packn); std::vector<__fp16> ss1(packn); std::vector<__fp16> ss2(packn); std::vector<__fp16> ss3(packn); std::vector<__fp16> ss4(packn); std::vector<__fp16> ss5(packn); std::vector<__fp16> ss6(packn); std::vector<__fp16> ss7(packn); vse16_v_f16m1((__fp16*)ss0.data(), _sum0, vl); vse16_v_f16m1((__fp16*)ss1.data(), _sum1, vl); vse16_v_f16m1((__fp16*)ss2.data(), _sum2, vl); vse16_v_f16m1((__fp16*)ss3.data(), _sum3, vl); vse16_v_f16m1((__fp16*)ss4.data(), _sum4, vl); vse16_v_f16m1((__fp16*)ss5.data(), _sum5, vl); vse16_v_f16m1((__fp16*)ss6.data(), _sum6, vl); vse16_v_f16m1((__fp16*)ss7.data(), _sum7, vl); for (int i = 0; i < packn; i++) { sum0 += ss0[i]; sum1 += ss1[i]; sum2 += ss2[i]; sum3 += ss3[i]; sum4 += ss4[i]; sum5 += ss5[i]; sum6 += ss6[i]; sum7 += ss7[i]; } #else sum0 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum0, vfmv_s_f_f16m1(vfloat16m1_t(), sum0, vl), vl)); sum1 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum1, vfmv_s_f_f16m1(vfloat16m1_t(), sum1, vl), vl)); sum2 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum2, vfmv_s_f_f16m1(vfloat16m1_t(), sum2, vl), vl)); sum3 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum3, vfmv_s_f_f16m1(vfloat16m1_t(), sum3, vl), vl)); sum4 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum4, vfmv_s_f_f16m1(vfloat16m1_t(), sum4, vl), vl)); sum5 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum5, vfmv_s_f_f16m1(vfloat16m1_t(), sum5, vl), vl)); sum6 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum6, vfmv_s_f_f16m1(vfloat16m1_t(), sum6, vl), vl)); sum7 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum7, vfmv_s_f_f16m1(vfloat16m1_t(), sum7, vl), vl)); #endif outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; } for (; i + 3 < size; i += 4) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr0 = kernel.channel(p / packn + p % packn); int nn = inch * maxk; // inch always > 0 __fp16 sum0 = bias0; __fp16 sum1 = bias0; __fp16 sum2 = bias0; __fp16 sum3 = bias0; vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { vfloat16m1x4_t _val01 = vlseg4e16_v_f16m1x4(tmpptr, vl); vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vv_f16m1(_sum0, vget_f16m1x4_f16m1(_val01, 0), _w0, vl); _sum1 = vfmacc_vv_f16m1(_sum1, vget_f16m1x4_f16m1(_val01, 1), _w0, vl); _sum2 = vfmacc_vv_f16m1(_sum2, vget_f16m1x4_f16m1(_val01, 2), _w0, vl); _sum3 = vfmacc_vv_f16m1(_sum3, vget_f16m1x4_f16m1(_val01, 3), _w0, vl); tmpptr += packn * 4; kptr0 += packn; } #if C906 // TODO std::vector<__fp16> ss0(packn); std::vector<__fp16> ss1(packn); std::vector<__fp16> ss2(packn); std::vector<__fp16> ss3(packn); vse16_v_f16m1((__fp16*)ss0.data(), _sum0, vl); vse16_v_f16m1((__fp16*)ss1.data(), _sum1, vl); vse16_v_f16m1((__fp16*)ss2.data(), _sum2, vl); vse16_v_f16m1((__fp16*)ss3.data(), _sum3, vl); for (int i = 0; i < packn; i++) { sum0 += ss0[i]; sum1 += ss1[i]; sum2 += ss2[i]; sum3 += ss3[i]; } #else sum0 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum0, vfmv_s_f_f16m1(vfloat16m1_t(), sum0, vl), vl)); sum1 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum1, vfmv_s_f_f16m1(vfloat16m1_t(), sum1, vl), vl)); sum2 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum2, vfmv_s_f_f16m1(vfloat16m1_t(), sum2, vl), vl)); sum3 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum3, vfmv_s_f_f16m1(vfloat16m1_t(), sum3, vl), vl)); #endif outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; } for (; i + 1 < size; i += 2) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* kptr0 = kernel.channel(p / packn + p % packn); int nn = inch * maxk; // inch always > 0 __fp16 sum0 = bias0; __fp16 sum1 = bias0; vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { vfloat16m1x2_t _val01 = vlseg2e16_v_f16m1x2(tmpptr, vl); vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vv_f16m1(_sum0, vget_f16m1x2_f16m1(_val01, 0), _w0, vl); _sum1 = vfmacc_vv_f16m1(_sum1, vget_f16m1x2_f16m1(_val01, 1), _w0, vl); tmpptr += packn * 2; kptr0 += packn; } #if C906 // TODO std::vector<__fp16> ss0(packn); std::vector<__fp16> ss1(packn); vse16_v_f16m1((__fp16*)ss0.data(), _sum0, vl); vse16_v_f16m1((__fp16*)ss1.data(), _sum1, vl); for (int i = 0; i < packn; i++) { sum0 += ss0[i]; sum1 += ss1[i]; } #else sum0 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum0, vfmv_s_f_f16m1(vfloat16m1_t(), sum0, vl), vl)); sum1 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum1, vfmv_s_f_f16m1(vfloat16m1_t(), sum1, vl), vl)); #endif outptr0[0] = sum0; outptr0[1] = sum1; outptr0 += 2; } for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* kptr0 = kernel.channel(p / packn + p % packn); int nn = inch * maxk; // inch always > 0 __fp16 sum0 = bias0; vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { vfloat16m1_t _val0 = vle16_v_f16m1(tmpptr, vl); vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vv_f16m1(_sum0, _val0, _w0, vl); tmpptr += packn; kptr0 += packn; } #if C906 // TODO std::vector<__fp16> ss0(packn); vse16_v_f16m1((__fp16*)ss0.data(), _sum0, vl); for (int i = 0; i < packn; i++) { sum0 += ss0[i]; } #else sum0 = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum0, vfmv_s_f_f16m1(vfloat16m1_t(), sum0, vl), vl)); #endif outptr0[0] = sum0; outptr0 += 1; } } } static void convolution_im2col_sgemm_transform_kernel_packnto1_fp16sa_rvv(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int packn = csrr_vlenb() / 2; const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = pb-pa-maxk-inch/pa-outch/pb Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(packn * packn * maxk, inch / packn, outch / packn + outch % packn, (size_t)2u); int q = 0; for (; q + (packn - 1) < outch; q += packn) { __fp16* g00 = kernel_tm.channel(q / packn); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } for (; q < outch; q++) { const Mat k0 = kernel.channel(q); __fp16* g00 = kernel_tm.channel(q / packn + q % packn); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < packn; j++) { const float* k00 = k0.row(p + j); g00[0] = (__fp16)k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * packn; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * packn; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vse16_v_f16m1(ptr, _val, vl); sptr += stride_w * packn; ptr += packn; } sptr += gap; } } } } } im2col_sgemm_packnto1_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); }
conv_dw_hcl_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "convolution_param.h" #include "conv_dw_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v) { int8_t* ptr = input; int8_t* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(int8_t)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } static int convdw3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = (int8_t*)output_tensor->data; int8_t* input_int8 = (int8_t*)input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = (int32_t*)bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = (const signed char*)weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int convdw3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = (int8_t*)output_tensor->data; int8_t* input_int8 = (int8_t*)input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = (int32_t*)bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = (const signed char*)weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } int tailstep = inw_tmp - 2 * outw + inw_tmp; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int conv_dw_run_int8(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int ret = -1; switch(param->stride_h) { case 1: ret = convdw3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; case 2: ret = convdw3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; default: TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", param->stride_h); } return ret; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); struct tensor* bias_tensor = NULL; struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int num_thread = exec_graph->num_thread; int cpu_affinity = exec_graph->cpu_affinity; /* set the input data and shape again, in case of reshape or dynamic shape */ if (ir_node->input_num > 2) bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem; struct conv_priv_info* conv_priv_info = ( struct conv_priv_info* )exec_node->ops_priv; int ret = -1; if (exec_graph->mode == TENGINE_MODE_FP32) ret = conv_dw_run(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_priv_info, conv_param, num_thread, cpu_affinity); else if (exec_graph->mode == TENGINE_MODE_INT8) ret = conv_dw_run_int8(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); else { TLOG_ERR("hcl conv run failed\n"); return -1; } return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem; struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int pad_h1 = param->pad_h1; int pad_w1 = param->pad_w1; int in_c = input_tensor->dims[1] / group; int out_c = output_tensor->dims[1] / group; /* todo support uint8 */ if (!(input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_INT8)) return 0; if (kernel_h != kernel_w || input_tensor->dims[0] > 1) return 0; if (param->group > 1 && in_c == 1 && out_c == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 && ((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2))) return OPS_SCORE_BEST; else return 0; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_conv_dw_hcl_x86_op() { return register_builtin_node_ops(OP_CONV, &hcl_node_ops); } int unregister_conv_dw_hcl_x86_op() { unregister_builtin_node_ops(OP_CONV, &hcl_node_ops); return 0; }
openmp_tournament.c
/* * TOURNAMENT BARRIER: OPENMP * To show correct functionality of barrier: Uncomment line 142 * To compile: gcc -o openmp_tournament openmp_tournament.c -lm -fopenmp * To run: ./openmp_tournament [num_threads num_barriers] */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> #include <stdbool.h> #define WINNER 0 #define LOSER 1 #define BYE 2 #define CHAMPION 3 #define DROPOUT 4 typedef struct record{ int role; int opponent; int flag; }record_t; record_t** players; int P, N; void barrier_init() { int i, k; int rounds = (ceil(log(P)/log(2))); players = (record_t**)malloc(P * sizeof(record_t*)); for(i = 0; i < P; i++) { players[i] = (record_t*)calloc(rounds+1, sizeof(record_t)); } for(i = 0; i < P; i++) { for(k=0;k<=rounds;k++) { //Initializing players[i][k].role = -1; players[i][k].opponent = -1; players[i][k].flag = 0; //Initializing role and opponent if (k==0){ players[i][k].role = DROPOUT; } else if (k>0){ if (i == 0 && (1<<k)>=P){ players[i][k].role = CHAMPION; players[i][k].opponent = i+(1<<(k-1)); } else if (i%(1<<k) == 0){ if (((i + (1<<(k-1))) < P) && ((1<<k) < P)){ players[i][k].role = WINNER; players[i][k].opponent = i+(1<<(k-1)); } else if ((i + (1<<(k-1))) >= P){ players[i][k].role = BYE; } } else if ((i%(1<<k)) == (1<<(k-1))){ players[i][k].role = LOSER; players[i][k].opponent = i-(1<<(k-1)); } } } } } void tournament_barrier(int *pid_sense) { int rounds = (ceil(log(P)/log(2))); int round = 1; int vpid = omp_get_thread_num(); int k; //Arrival tree while(1) { if (players[vpid][round].role == LOSER){ players[players[vpid][round].opponent][round].flag = *pid_sense; while (players[vpid][round].flag != *pid_sense); break; } else if (players[vpid][round].role == WINNER){ while (players[vpid][round].flag != *pid_sense); } else if (players[vpid][round].role == CHAMPION){ while (players[vpid][round].flag != *pid_sense); players[players[vpid][round].opponent][round].flag = *pid_sense; break; } round += 1; if (round > rounds) break; } //Wakeup tree while(1) { if (round == -1) break; round -= 1; if (players[vpid][round].role == WINNER){ players[players[vpid][round].opponent][round].flag = *pid_sense; } else if (players[vpid][round].role == DROPOUT) break; } *pid_sense = !*pid_sense; } int main(int argc, char* argv[]) { if (argc == 3){ if (sscanf (argv[1], "%d", &P)!=1) printf ("P - not an integer\n"); if (sscanf (argv[2], "%d", &N)!=1) printf ("N - not an integer\n"); } else{ //Number of processors P = 4; //Number of loops N = 5; } struct timeval tv1, tv2; double total_time; int pid_sense = 1; barrier_init(); omp_set_num_threads(P); gettimeofday(&tv1, NULL); #pragma omp parallel shared (players,N) firstprivate(pid_sense) { int i; for(i=0;i<N;i++) { // printf("==============BARRIER %d=================\n", i); tournament_barrier(&pid_sense); tournament_barrier(&pid_sense); tournament_barrier(&pid_sense); tournament_barrier(&pid_sense); tournament_barrier(&pid_sense); } } gettimeofday(&tv2, NULL); total_time = (double) (tv2.tv_usec - tv1.tv_usec) + (double) (tv2.tv_sec - tv1.tv_sec)*1000000; printf("\nSUMMARY:\nTotal run-time for %d " "loops with 5 barriers per loop: %fs\n" "The average time per barrier: %fus\n", N, total_time/1000000, (double)(total_time/(N*5))); free(players); return 0; }
batchnorm_tpp.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Kirill Voronin (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <libxsmm_intrinsics_x86.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #define BITS_PER_CHAR (8) typedef enum my_bn_fuse { MY_BN_FUSE_NONE = 0, MY_BN_FUSE_RELU = 1, MY_BN_FUSE_ELTWISE = 2, MY_BN_FUSE_ELTWISE_RELU = 3, MY_BN_FUSE_RELU_WITH_MASK = 4, MY_BN_FUSE_ELTWISE_RELU_WITH_MASK = 5 } my_bn_fuse; typedef enum my_bn_norm_type { MY_BN_FULL_NORM = 0, /* stats + normalize for fwd, all grads for bwd */ MY_BN_SCALE_ONLY = 1 /* normalize only for fwd, only input grad for bwd */ } my_bn_norm_type; typedef struct my_bn_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint H; libxsmm_blasint W; libxsmm_blasint bc; libxsmm_blasint CP; libxsmm_blasint num_HW_blocks; libxsmm_blasint threads; size_t scratch_size; libxsmm_datatype datatype_in; libxsmm_datatype datatype_out; libxsmm_datatype datatype_comp; libxsmm_barrier* barrier; libxsmm_matrix_eqn_function func10; libxsmm_meltwfunction_unary reduce_HW_kernel; libxsmm_meltwfunction_unary all_zero_kernel; libxsmm_meltwfunction_binary helper_add_kernel; my_bn_fuse fuse_type; } my_bn_fwd_config; typedef struct my_bn_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint H; libxsmm_blasint W; libxsmm_blasint bc; libxsmm_blasint CP; libxsmm_blasint num_HW_blocks; libxsmm_blasint threads; size_t scratch_size; libxsmm_datatype datatype_in; libxsmm_datatype datatype_out; libxsmm_datatype datatype_comp; libxsmm_barrier* barrier; libxsmm_matrix_eqn_function dgamma_func; libxsmm_matrix_eqn_function dbeta_func; libxsmm_matrix_eqn_function din_func; libxsmm_meltwfunction_unary all_zero_kernel; libxsmm_meltwfunction_binary helper_add_kernel; libxsmm_meltwfunction_unary helper_copy_kernel; libxsmm_meltwfunction_unary inv_relu_kernel; libxsmm_meltwfunction_unary ewise_copy_kernel; my_bn_fuse fuse_type; } my_bn_bwd_config; my_bn_fwd_config setup_my_bn_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint H, libxsmm_blasint W, libxsmm_blasint bc, libxsmm_blasint threads, my_bn_fuse fuse_type, libxsmm_datatype datatype_in, libxsmm_datatype datatype_out, libxsmm_datatype datatype_comp ) { my_bn_fwd_config res; size_t sum_N_offset, sumsq_N_offset; libxsmm_meltw_unary_shape unary_shape; libxsmm_meltw_binary_shape binary_shape; libxsmm_bitfield unary_flags; libxsmm_bitfield binary_flags; libxsmm_bitfield ternary_flags; libxsmm_blasint ldo = bc; libxsmm_blasint ld = bc; libxsmm_blasint tmp_ld, tmp_ld2; libxsmm_blasint my_eqn10; libxsmm_meqn_arg_shape eqn_out_arg_shape; libxsmm_meqn_arg_shape arg_shape[128]; libxsmm_matrix_arg_attributes arg_singular_attr; libxsmm_matrix_eqn_arg_metadata arg_metadata[128]; libxsmm_matrix_eqn_op_metadata op_metadata[128]; arg_singular_attr.type = LIBXSMM_MATRIX_ARG_TYPE_SINGULAR; memset( &res, 0, sizeof(res)); /* setting up some handle values */ res.N = N; res.C = C; res.H = H; res.W = W; res.bc = bc; res.CP = res.C / res.bc; res.num_HW_blocks = (res.H > res.W ? res.H : res.W ); res.threads = threads; res.fuse_type = fuse_type; /* when masking is on, bc must be divisible by 8 for compressing mask into char array (otherwise strides are wrong for relumask */ if ( (res.fuse_type == 4 || res.fuse_type == 5) && (res.bc % BITS_PER_CHAR != 0)) { fprintf( stderr, "bc = %d is not divisible by BITS_PER_CHAR = %d. Bailing...!\n", res.bc, BITS_PER_CHAR); exit(-1); } res.datatype_in = datatype_in; res.datatype_out = datatype_out; res.datatype_comp = datatype_comp; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ memset( &unary_shape, 0, sizeof(libxsmm_meltw_unary_shape)); memset( &binary_shape, 0, sizeof(libxsmm_meltw_binary_shape)); /* Eltwise TPPs */ unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, res.bc, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp); res.all_zero_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_XOR, unary_shape, unary_flags); if ( res.all_zero_kernel == NULL) { fprintf( stderr, "JIT for TPP fwd all_zero_kernel failed. Bailing...!\n"); exit(-1); } binary_shape = libxsmm_create_meltw_binary_shape(res.bc, 1, ldo, ldo, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; res.helper_add_kernel = libxsmm_dispatch_meltw_binary_v2(LIBXSMM_MELTW_TYPE_BINARY_ADD, binary_shape, binary_flags); if ( res.helper_add_kernel == NULL) { fprintf( stderr, "JIT for TPP fwd helper_add_kernel failed. Bailing...!\n"); exit(-1); } /* TPPs for reducing X and X2 in HW*/ tmp_ld = bc; unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ld, tmp_ld, res.datatype_in, res.datatype_comp, res.datatype_comp); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; res.reduce_HW_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD, unary_shape, unary_flags); if ( res.reduce_HW_kernel == NULL) { fprintf( stderr, "JIT for TPP fwd reduce_HW_kernel failed. Bailing...!\n"); exit(-1); } /* TPP for scaling */ ld = bc; tmp_ld = 1; tmp_ld2 = 1; my_eqn10 = libxsmm_matrix_eqn_create(); /* y = relu ( ( (s*x + b)*gamma + beta ) + inp_add) */ if (res.fuse_type == 1 || res.fuse_type == 3 || res.fuse_type == 4 || res.fuse_type == 5) { unary_flags = ( (res.fuse_type == 4 || res.fuse_type == 5) ? LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT : LIBXSMM_MELTW_FLAG_UNARY_NONE); op_metadata[3].eqn_idx = my_eqn10; op_metadata[3].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_UNARY_RELU, res.datatype_out, unary_flags); if (res.datatype_out == LIBXSMM_DATATYPE_BF16) libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_UNARY_IDENTITY, res.datatype_out, LIBXSMM_MELTW_FLAG_UNARY_NONE); } if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) { binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[2].eqn_idx = my_eqn10; op_metadata[2].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_ADD, res.datatype_comp, binary_flags); } ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[0].eqn_idx = my_eqn10; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags); arg_metadata[3].eqn_idx = my_eqn10; arg_metadata[3].in_arg_pos = 3; arg_shape[3].m = res.bc; /* gamma = [bc] */ arg_shape[3].n = 1; arg_shape[3].ld = tmp_ld2; arg_shape[3].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr); ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[1].eqn_idx = my_eqn10; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags); arg_metadata[1].eqn_idx = my_eqn10; arg_metadata[1].in_arg_pos = 1; arg_shape[1].m = res.bc; /* s = [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld; arg_shape[1].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); arg_metadata[0].eqn_idx = my_eqn10; arg_metadata[0].in_arg_pos = 0; arg_shape[0].m = res.bc; /* x = [HW, bc] */ arg_shape[0].n = res.H*res.W /res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = res.datatype_in; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[2].eqn_idx = my_eqn10; arg_metadata[2].in_arg_pos = 2; arg_shape[2].m = res.bc; /* b = [bc] */ arg_shape[2].n = 1; arg_shape[2].ld = tmp_ld; arg_shape[2].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); arg_metadata[4].eqn_idx = my_eqn10; arg_metadata[4].in_arg_pos = 4; arg_shape[4].m = res.bc; /* beta = [bc] */ arg_shape[4].n = 1; arg_shape[4].ld = tmp_ld2; arg_shape[4].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr); if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) { arg_metadata[5].eqn_idx = my_eqn10; arg_metadata[5].in_arg_pos = 5; arg_shape[5].m = res.bc; /* inp_add = [HW, bc] */ arg_shape[5].n = res.H*res.W / res.num_HW_blocks; arg_shape[5].ld = ld; arg_shape[5].type = res.datatype_in; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[5], arg_shape[5], arg_singular_attr); } eqn_out_arg_shape.m = res.bc; /* y = [HW, bc] */ eqn_out_arg_shape.n = res.H*res.W / res.num_HW_blocks; eqn_out_arg_shape.ld = ld; eqn_out_arg_shape.type = res.datatype_out; /* libxsmm_matrix_eqn_tree_print( my_eqn10 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn10 ); */ res.func10 = libxsmm_dispatch_matrix_eqn_v2( my_eqn10, eqn_out_arg_shape ); if ( res.func10 == NULL) { fprintf( stderr, "JIT for TPP fwd func10 (eqn10) failed. Bailing...!\n"); exit(-1); } /* init scratch */ sum_N_offset = LIBXSMM_UP2(res.CP * 2 * res.bc, 64); sumsq_N_offset = LIBXSMM_UP2(sum_N_offset + res.CP * res.N * res.bc, 64); res.scratch_size = sizeof(float) * ( sumsq_N_offset /*sum_X_X2 + sumsq_N */ + LIBXSMM_UP2((size_t)res.CP * (size_t)res.N * (size_t)res.bc, 64) /* sumsq_N */ ); return res; } my_bn_bwd_config setup_my_bn_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint H, libxsmm_blasint W, libxsmm_blasint bc, libxsmm_blasint threads, my_bn_fuse fuse_type, libxsmm_datatype datatype_in, libxsmm_datatype datatype_out, libxsmm_datatype datatype_comp ) { my_bn_bwd_config res; libxsmm_meltw_unary_shape unary_shape; libxsmm_meltw_binary_shape binary_shape; libxsmm_bitfield unary_flags; libxsmm_bitfield binary_flags; libxsmm_bitfield ternary_flags; libxsmm_meqn_arg_shape eqn_out_arg_shape; libxsmm_meqn_arg_shape arg_shape[128]; libxsmm_matrix_arg_attributes arg_singular_attr; libxsmm_matrix_eqn_arg_metadata arg_metadata[128]; libxsmm_matrix_eqn_op_metadata op_metadata[128]; arg_singular_attr.type = LIBXSMM_MATRIX_ARG_TYPE_SINGULAR; size_t dbeta_N_offset; libxsmm_blasint ldo = bc; libxsmm_blasint ld = bc; libxsmm_blasint tmp_ld2; libxsmm_blasint my_eqn11, my_eqn12, my_eqn16; memset( &res, 0, sizeof(res)); /* setting up some handle values */ res.N = N; res.C = C; res.H = H; res.W = W; res.bc = bc; res.CP = res.C / res.bc; res.num_HW_blocks = (res.H > res.W ? res.H : res.W ); res.threads = threads; res.fuse_type = fuse_type; /* when masking is on, bc must be divisible by 8 for compressing mask into char array (otherwise strides are wrong for relumask */ if ( (res.fuse_type == 4 || res.fuse_type == 5) && (res.bc % BITS_PER_CHAR != 0)) { fprintf( stderr, "bc = %d is not divisible by BITS_PER_CHAR = %d. Bailing...!\n", res.bc, BITS_PER_CHAR); exit(-1); } res.datatype_in = datatype_in; res.datatype_out = datatype_out; res.datatype_comp = datatype_comp; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ memset( &unary_shape, 0, sizeof(libxsmm_meltw_unary_shape)); memset( &binary_shape, 0, sizeof(libxsmm_meltw_binary_shape)); /* Eltwise TPPs */ unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, res.bc, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; res.all_zero_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_XOR, unary_shape, unary_flags); if ( res.all_zero_kernel == NULL) { fprintf( stderr, "JIT for TPP fwd all_zero_kernel failed. Bailing...!\n"); exit(-1); } unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, ldo, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; res.helper_copy_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_IDENTITY, unary_shape, unary_flags); if ( res.helper_copy_kernel == NULL) { fprintf( stderr, "JIT for TPP fwd helper_copy_kernel failed. Bailing...!\n"); exit(-1); } binary_shape = libxsmm_create_meltw_binary_shape(res.bc, 1, ldo, ldo, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; res.helper_add_kernel = libxsmm_dispatch_meltw_binary_v2(LIBXSMM_MELTW_TYPE_BINARY_ADD, binary_shape, binary_flags); if ( res.helper_add_kernel == NULL) { fprintf( stderr, "JIT for TPP fwd helper_add_kernel failed. Bailing...!\n"); exit(-1); } if (res.fuse_type == 1 || res.fuse_type == 3 || res.fuse_type == 4 || res.fuse_type == 5) { unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, res.datatype_in, res.datatype_out, res.datatype_comp); unary_flags = ( (res.fuse_type == 4 || res.fuse_type == 5) ? LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT : LIBXSMM_MELTW_FLAG_UNARY_NONE); res.inv_relu_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_RELU_INV, unary_shape, unary_flags); if ( res.inv_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd inv_relu_kernel failed. Bailing...!\n"); exit(-1); } } if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) { unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, res.datatype_in, res.datatype_out, res.datatype_comp); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; res.ewise_copy_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_IDENTITY, unary_shape, unary_flags); if ( res.ewise_copy_kernel == NULL) { fprintf( stderr, "JIT for TPP bwd ewise_copy_kernel failed. Bailing...!\n"); exit(-1); } } /* TPP equations for dgamma, dbeta and din */ ld = bc; tmp_ld2 = 1; /* dgamma function */ my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */ binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[0].eqn_idx = my_eqn11; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, res.datatype_comp, binary_flags); unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; op_metadata[1].eqn_idx = my_eqn11; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, res.datatype_comp, unary_flags); binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[2].eqn_idx = my_eqn11; op_metadata[2].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_MUL, res.datatype_comp, binary_flags); ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[3].eqn_idx = my_eqn11; op_metadata[3].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags); arg_metadata[0].eqn_idx = my_eqn11; arg_metadata[0].in_arg_pos = 0; arg_shape[0].m = res.bc; /* inp [HW, bc] */ arg_shape[0].n = res.H*res.W /res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = res.datatype_in; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn11; arg_metadata[1].in_arg_pos = 1; arg_shape[1].m = res.bc; /* a [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld2; arg_shape[1].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); arg_metadata[2].eqn_idx = my_eqn11; arg_metadata[2].in_arg_pos = 2; arg_shape[2].m = res.bc; /* b [bc] */ arg_shape[2].n = 1; arg_shape[2].ld = tmp_ld2; arg_shape[2].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); arg_metadata[3].eqn_idx = my_eqn11; arg_metadata[3].in_arg_pos = 3; arg_shape[3].m = res.bc; /* dout [HW, bc] */ arg_shape[3].n = res.H*res.W/res.num_HW_blocks; arg_shape[3].ld = ld; arg_shape[3].type = res.datatype_out; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr); arg_metadata[4].eqn_idx = my_eqn11; arg_metadata[4].in_arg_pos = 4; arg_shape[4].m = res.bc; /* dgamma [bc] */ arg_shape[4].n = 1; arg_shape[4].ld = tmp_ld2; arg_shape[4].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* dgamma [bc] */ eqn_out_arg_shape.n = 1; eqn_out_arg_shape.ld = tmp_ld2; eqn_out_arg_shape.type = res.datatype_comp; /* libxsmm_matrix_eqn_tree_print( my_eqn11 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn11 ); */ res.dgamma_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn11, eqn_out_arg_shape ); if ( res.dgamma_func == NULL) { fprintf( stderr, "JIT for TPP bwd dgamma_func (eqn11) failed. Bailing...!\n"); exit(-1); } /* dbeta function */ my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [bc] = dout [HW, bc] + dbeta [bc] */ binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; op_metadata[0].eqn_idx = my_eqn12; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, res.datatype_comp, binary_flags); /* dbeta_tmp [HW, bc] */ unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; op_metadata[1].eqn_idx = my_eqn12; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, res.datatype_comp, unary_flags); /* [HW, bc] -> [bc] */ arg_metadata[0].eqn_idx = my_eqn12; arg_metadata[0].in_arg_pos = 3; arg_shape[0].m = res.bc; /* dout [HW, bc] */ arg_shape[0].n = res.H*res.W /res.num_HW_blocks; arg_shape[0].ld = ld; arg_shape[0].type = res.datatype_out; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn12; arg_metadata[1].in_arg_pos = 5; arg_shape[1].m = res.bc; /* dbeta [bc] */ arg_shape[1].n = 1; arg_shape[1].ld = tmp_ld2; arg_shape[1].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* dbeta [bc] */ eqn_out_arg_shape.n = 1; eqn_out_arg_shape.ld = tmp_ld2; eqn_out_arg_shape.type = res.datatype_comp; /* libxsmm_matrix_eqn_tree_print( my_eqn12 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn12 ); */ res.dbeta_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn12, eqn_out_arg_shape ); if ( res.dbeta_func == NULL) { fprintf( stderr, "JIT for TPP bwd dbeta_func (eqn12) failed. Bailing...!\n"); exit(-1); } /* din = gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (nhw*del_output_ptr[v] - (del_beta_ptr[v] + (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v])) */ /* din = gamma_ptr[v] * brstd_ptr[v] *del_output_ptr[v] - gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (del_beta_ptr[v] + (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v])) */ /* din = gamma_ptr[v] * brstd_ptr[v] *del_output_ptr[v] - gamma_ptr[v] * brstd_ptr[v] * recp_nhw * del_beta_ptr[v] + gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v]) */ /* din = a * del_output_ptr[v] + b * input_ptr[v] + c */ /* a = gamma_ptr[bc] * brstd_ptr[bc] */ /* b = gamma_ptr[bc] * del_gamma_ptr[v] * brstd_ptr[bc] * brstd_ptr[bc] * recp_nhw */ /* c = -gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * del_beta_ptr[bc] + gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * bmean_ptr[bc] * del_gamma_ptr[bc] * brstd_ptr[bc]) */ /* din long equation */ my_eqn16 = libxsmm_matrix_eqn_create(); /* din = a * dout + (b * inp + c) */ ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[0].eqn_idx = my_eqn16; op_metadata[0].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags); arg_metadata[0].eqn_idx = my_eqn16; arg_metadata[0].in_arg_pos = 1; arg_shape[0].m = res.bc; /* a [bc] */ arg_shape[0].n = 1; arg_shape[0].ld = tmp_ld2; arg_shape[0].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr); arg_metadata[1].eqn_idx = my_eqn16; arg_metadata[1].in_arg_pos = 3; arg_shape[1].m = res.bc; /* dout [HW, bc] */ arg_shape[1].n = res.H*res.W /res.num_HW_blocks; arg_shape[1].ld = ld; arg_shape[1].type = res.datatype_out; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr); ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT; op_metadata[1].eqn_idx = my_eqn16; op_metadata[1].op_arg_pos = -1; libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags); arg_metadata[2].eqn_idx = my_eqn16; arg_metadata[2].in_arg_pos = 0; arg_shape[2].m = res.bc; /* inp [HW, bc] */ arg_shape[2].n = res.H*res.W /res.num_HW_blocks; arg_shape[2].ld = ld; arg_shape[2].type = res.datatype_in; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr); arg_metadata[3].eqn_idx = my_eqn16; arg_metadata[3].in_arg_pos = 2; arg_shape[3].m = res.bc; /* b [bc] */ arg_shape[3].n = 1; arg_shape[3].ld = tmp_ld2; arg_shape[3].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr); arg_metadata[4].eqn_idx = my_eqn16; arg_metadata[4].in_arg_pos = 7; arg_shape[4].m = res.bc; /* c [bc] */ arg_shape[4].n = 1; arg_shape[4].ld = tmp_ld2; arg_shape[4].type = res.datatype_comp; libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr); eqn_out_arg_shape.m = res.bc; /* din [HW, bc] */ eqn_out_arg_shape.n = res.H*res.W/res.num_HW_blocks; eqn_out_arg_shape.ld = ld; eqn_out_arg_shape.type = res.datatype_out; /* libxsmm_matrix_eqn_tree_print( my_eqn16 ); */ /* libxsmm_matrix_eqn_rpn_print ( my_eqn16 ); */ res.din_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn16, eqn_out_arg_shape ); if ( res.din_func == NULL) { fprintf( stderr, "JIT for TPP bwd din_func (eqn16) failed. Bailing...!\n"); exit(-1); } /* init scratch */ dbeta_N_offset = LIBXSMM_UP2(res.CP * res.N * res.bc, 64); res.scratch_size = sizeof(float) * ( dbeta_N_offset /* dbeta_N*/ + LIBXSMM_UP2(res.CP * res.N * res.bc, 64) /*dgamma_N */ ); return res; } void destroy_my_bn_fwd(my_bn_fwd_config* cfg) { libxsmm_barrier_destroy(cfg->barrier); /* when/if libxsmm_matrix_eqn_destroy gets added, destructords for equations should go here */ } void destroy_my_bn_bwd(my_bn_bwd_config* cfg) { libxsmm_barrier_destroy(cfg->barrier); /* when/if libxsmm_matrix_eqn_destroy gets added, destructords for equations should go here */ } void my_bn_fwd_exec_f32( my_bn_fwd_config cfg, const float *pinp, const float *pinp_add, const float *pgamma, const float *pbeta, float *mean, float *var, float *pout, unsigned char *prelumask, float eps, int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type ) { const libxsmm_blasint N = cfg.N; const libxsmm_blasint CP = cfg.CP; const libxsmm_blasint HW = cfg.H * cfg.W; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_dN = CP * N; /* compute chunk size */ const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ? (work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_dN = (ltid * chunksize_dN < work_dN) ? (ltid * chunksize_dN) : work_dN; const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_C = CP; /* compute chunk size */ const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ? (work_C / cfg.threads) : ((work_C / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_C = (ltid * chunksize_C < work_C) ? (ltid * chunksize_C) : work_C; const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); LIBXSMM_VLA_DECL(4, const float, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, const float, beta, pbeta, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, mean, mean, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, var, var, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(4, const float, inp_add, pinp_add, CP, HW, bc); /* [N, CP, HW, bc] */ float alpha = 0.0f; LIBXSMM_VLA_DECL(4, unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */ const float scale = 1.0f /((float)N * HW); LIBXSMM_VLA_DECL(3, float, sum_X_X2, ((float*)scratch), CP, bc); /* [2, CP, bc] */ LIBXSMM_ASSUME_ALIGNED(sum_X_X2_, 64); const libxsmm_blasint sum_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * 2 * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float); LIBXSMM_VLA_DECL(3, float, sum_N, ((float*)scratch) + sum_N_offset, N, bc); /* [CP, N, bc] */ LIBXSMM_ASSUME_ALIGNED(sum_N_, 64); const libxsmm_blasint sumsq_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + sum_N_offset + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float); LIBXSMM_VLA_DECL(3, float, sumsq_N, ((float*)scratch) + sumsq_N_offset, N, bc); /* [CP, N, bc] */ LIBXSMM_ASSUME_ALIGNED(sumsq_N_, 64); libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_meltw_unary_param reduce_HW_param; libxsmm_meltw_unary_param all_relu_param; libxsmm_matrix_arg arg_array[6]; libxsmm_matrix_eqn_param eqn_param; memset( &all_zero_param, 0, sizeof(all_zero_param)); memset( &add_param, 0, sizeof(add_param)); memset( &reduce_HW_param, 0, sizeof(reduce_HW_param)); memset( &all_relu_param, 0, sizeof(all_relu_param)); memset( &eqn_param, 0, sizeof(eqn_param)); LIBXSMM_ALIGNED(float s[bc], 64); LIBXSMM_ALIGNED(float b[bc], 64); int n, cp; int cpxnt; if (norm_type == MY_BN_FULL_NORM) { for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb; float *sum_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, n, 0, N, bc); float *sumsq_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, n, 0, N, bc); all_zero_param.out.primary = sum_ncp_ptr; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = sumsq_ncp_ptr; cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_ncp_ptr[cb] = 0.0f; */ /* sumsq_ncp_ptr[cb] = 0.0f; */ /* } */ LIBXSMM_ALIGNED(float lcl_sum_X_X2[2*bc], 64); reduce_HW_param.out.primary = lcl_sum_X_X2; /* [2*bc] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); cfg.reduce_HW_kernel(&reduce_HW_param); /* [HW, bc] -----> [2 * bc] */ add_param.in0.primary = sum_ncp_ptr; add_param.in1.primary = lcl_sum_X_X2; add_param.out.primary = sum_ncp_ptr; cfg.helper_add_kernel(&add_param); add_param.in0.primary = sumsq_ncp_ptr; add_param.in1.primary = &lcl_sum_X_X2[bc]; add_param.out.primary = sumsq_ncp_ptr; cfg.helper_add_kernel(&add_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_ncp_ptr[cb] += lcl_sum_X_X2[cb]; */ /* sumsq_ncp_ptr[cb] += lcl_sum_X_X2[bc + cb]; */ /* } */ } } libxsmm_barrier_wait(cfg.barrier, ltid); for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) { all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc); cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_X_X2[cp*bc + cb] = 0.0f; */ /* sum_X_X2[CP*bc + (cp*bc + cb)] = 0.0f; */ /* } */ int cb, ni; for(ni = 0; ni < N; ni++){ add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc); cfg.helper_add_kernel(&add_param); add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc); cfg.helper_add_kernel(&add_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_X_X2[cp*bc + cb] += sum_N[cp*N*bc + n*bc + cb]; */ /* sum_X_X2[CP*bc + (cp*bc + cb)] += sumsq_N[cp*N*bc + n*bc + cb]; */ /* } */ } for(cb = 0; cb < bc; cb++){ mean[cp*bc + cb] = (LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, cb, CP, bc)) * scale; /* E[X] */ var[cp*bc + cb] = ((LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, cb, CP, bc)) * scale) - (mean[cp*bc + cb]*mean[cp*bc + cb]); } } libxsmm_barrier_wait(cfg.barrier, ltid); } /* mean and var computation are for the full norm only */ for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb, cb; for(cb = 0; cb < bc; cb++){ float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc); float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc); s[cb] = 1.0f / ((float)sqrt(lvar + eps)); /* s = 1/sqrt(var(X) + eps) [bc] */ b[cb] = -1 * lmean * s[cb]; /* b = -E[X]/sqrt(var(X) + eps) [bc] */ /* s[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */ /* s = 1/sqrt(var(X) + eps) [bc] */ /* b[cb] = -1 * mean[cp*bc + cb] * s[cb]; */ /* b = -E[X]/sqrt(var(X) + eps) [bc] */ } arg_array[1].primary = s; /* [bc] */ arg_array[2].primary = b; /* [bc] */ arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); /* [bc] */ arg_array[4].primary = (void*)&LIBXSMM_VLA_ACCESS(2, beta, cp, 0, bc); /* [bc] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */ if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { arg_array[5].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */ } if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { eqn_param.output.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, (bc/BITS_PER_CHAR)) : NULL ); } cfg.func10(&eqn_param); /* Normalization equation + relu + eltwise -> y = relu( ((s*x + b)*gamma + beta) + inp_add) */ } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_bn_fwd_exec_bf16( my_bn_fwd_config cfg, const libxsmm_bfloat16 *pinp, const libxsmm_bfloat16 *pinp_add, const float *pgamma, const float *pbeta, float *mean, float *var, libxsmm_bfloat16 *pout, unsigned char *prelumask, float eps, int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type ) { const libxsmm_blasint N = cfg.N; const libxsmm_blasint CP = cfg.CP; const libxsmm_blasint HW = cfg.H * cfg.W; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_dN = CP * N; /* compute chunk size */ const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ? (work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_dN = (ltid * chunksize_dN < work_dN) ? (ltid * chunksize_dN) : work_dN; const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_C = CP; /* compute chunk size */ const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ? (work_C / cfg.threads) : ((work_C / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_C = (ltid * chunksize_C < work_C) ? (ltid * chunksize_C) : work_C; const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, out, pout, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, const float, beta, pbeta, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, mean, mean, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, var, var, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, inp_add, pinp_add, CP, HW, bc); /* [N, CP, HW, bc] */ float alpha = 0.0f; LIBXSMM_VLA_DECL(4, unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */ libxsmm_meltw_binary_param add_param; libxsmm_meltw_unary_param all_relu_param; libxsmm_matrix_arg arg_array[6]; libxsmm_matrix_eqn_param eqn_param; memset( &add_param, 0, sizeof(add_param)); memset( &all_relu_param, 0, sizeof(all_relu_param)); memset( &eqn_param, 0, sizeof(eqn_param)); LIBXSMM_ALIGNED(float s[bc], 64); LIBXSMM_ALIGNED(float b[bc], 64); int n, cp; int cpxnt; if (norm_type == MY_BN_FULL_NORM) { const float scale = 1.0f /((float)N * HW); LIBXSMM_VLA_DECL(3, float, sum_X_X2, ((float*)scratch), CP, bc); /* [2, CP, bc] */ LIBXSMM_ASSUME_ALIGNED(sum_X_X2_, 64); const libxsmm_blasint sum_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * 2 * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float); LIBXSMM_VLA_DECL(3, float, sum_N, ((float*)scratch) + sum_N_offset, N, bc); /* [CP, N, bc] */ LIBXSMM_ASSUME_ALIGNED(sum_N_, 64); const libxsmm_blasint sumsq_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + sum_N_offset + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float); LIBXSMM_VLA_DECL(3, float, sumsq_N, ((float*)scratch) + sumsq_N_offset, N, bc); /* [CP, N, bc] */ LIBXSMM_ASSUME_ALIGNED(sumsq_N_, 64); libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_unary_param reduce_HW_param; memset( &all_zero_param, 0, sizeof(all_zero_param)); memset( &reduce_HW_param, 0, sizeof(reduce_HW_param)); for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb; float *sum_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, n, 0, N, bc); float *sumsq_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, n, 0, N, bc); all_zero_param.out.primary = sum_ncp_ptr; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = sumsq_ncp_ptr; cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_ncp_ptr[cb] = 0.0f; */ /* sumsq_ncp_ptr[cb] = 0.0f; */ /* } */ LIBXSMM_ALIGNED(float lcl_sum_X_X2[2*bc], 64); reduce_HW_param.out.primary = lcl_sum_X_X2; /* [2*bc] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); cfg.reduce_HW_kernel(&reduce_HW_param); /* [HW, bc] -----> [2 * bc] */ add_param.in0.primary = sum_ncp_ptr; add_param.in1.primary = lcl_sum_X_X2; add_param.out.primary = sum_ncp_ptr; cfg.helper_add_kernel(&add_param); add_param.in0.primary = sumsq_ncp_ptr; add_param.in1.primary = &lcl_sum_X_X2[bc]; add_param.out.primary = sumsq_ncp_ptr; cfg.helper_add_kernel(&add_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_ncp_ptr[cb] += lcl_sum_X_X2[cb]; */ /* sumsq_ncp_ptr[cb] += lcl_sum_X_X2[bc + cb]; */ /* } */ } } libxsmm_barrier_wait(cfg.barrier, ltid); for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) { all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc); cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_X_X2[cp*bc + cb] = 0.0f; */ /* sum_X_X2[CP*bc + (cp*bc + cb)] = 0.0f; */ /* } */ int cb, ni; for(ni = 0; ni < N; ni++){ add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc); cfg.helper_add_kernel(&add_param); add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc); cfg.helper_add_kernel(&add_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* sum_X_X2[cp*bc + cb] += sum_N[cp*N*bc + n*bc + cb]; */ /* sum_X_X2[CP*bc + (cp*bc + cb)] += sumsq_N[cp*N*bc + n*bc + cb]; */ /* } */ } for(cb = 0; cb < bc; cb++){ mean[cp*bc + cb] = (LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, cb, CP, bc)) * scale; /* E[X] */ var[cp*bc + cb] = ((LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, cb, CP, bc)) * scale) - (mean[cp*bc + cb]*mean[cp*bc + cb]); } } libxsmm_barrier_wait(cfg.barrier, ltid); } /* mean and var computation are for the full norm only */ for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb, cb; for(cb = 0; cb < bc; cb++){ float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc); float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc); s[cb] = 1.0f / ((float)sqrt(lvar + eps)); /* s = 1/sqrt(var(X) + eps) [bc] */ b[cb] = -1 * lmean * s[cb]; /* b = -E[X]/sqrt(var(X) + eps) [bc] */ /* s[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */ /* s = 1/sqrt(var(X) + eps) [bc] */ /* b[cb] = -1 * mean[cp*bc + cb] * s[cb]; */ /* b = -E[X]/sqrt(var(X) + eps) [bc] */ } arg_array[1].primary = s; /* [bc] */ arg_array[2].primary = b; /* [bc] */ arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); /* [bc] */ arg_array[4].primary = (void*)&LIBXSMM_VLA_ACCESS(2, beta, cp, 0, bc); /* [bc] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */ if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { arg_array[5].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */ } if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { eqn_param.output.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, (bc/BITS_PER_CHAR)) : NULL ); } cfg.func10(&eqn_param); /* Normalization equation + relu + eltwise -> y = relu( ((s*x + b)*gamma + beta) + inp_add) */ } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_bn_bwd_exec_f32( my_bn_bwd_config cfg, float *pdout, const float *pinp, const float *mean, const float *var, const float *pgamma, const unsigned char *prelumask, float *pdin, float *pdin_add, float *pdgamma, float *pdbeta, float eps, int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type) { const libxsmm_blasint N = cfg.N; const libxsmm_blasint CP = cfg.CP; const libxsmm_blasint HW = cfg.H * cfg.W; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_dN = N * CP; /* compute chunk size */ const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ? (work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_dN = ( ltid * chunksize_dN < work_dN) ? ( ltid * chunksize_dN) : work_dN; const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_C = CP; /* compute chunk size */ const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ? (work_C / cfg.threads) : ((work_C / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_C = ( ltid * chunksize_C < work_C) ? ( ltid * chunksize_C) : work_C; const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); const float scale = 1.0f / ((float)N*HW); /* Scaling parameter*/ LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(4, const float, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, const float, mean, mean, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, const float, var, var, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(4, float, din_add, pdin_add, CP, HW, bc); /* [N, CP, HW, bc] */ float alpha = 0.0f; LIBXSMM_VLA_DECL(4, const unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */ const libxsmm_blasint dbeta_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float); LIBXSMM_VLA_DECL(3, float, dgamma_N, ((float*)scratch), N, bc); /* [CP, N, bc] */ LIBXSMM_ASSUME_ALIGNED(dgamma_N_, 64); LIBXSMM_VLA_DECL(3, float, dbeta_N, ((float*)scratch) + dbeta_N_offset, N, bc); /* [CP, N, bc] */ LIBXSMM_ASSUME_ALIGNED(dbeta_N_, 64); libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_meltw_unary_param copy_param; libxsmm_meltw_unary_param all_relu_param; libxsmm_meltw_unary_param ewise_copy_param; memset( &all_zero_param, 0, sizeof(all_zero_param)); memset( &add_param, 0, sizeof(add_param)); memset( &copy_param, 0, sizeof(copy_param)); memset( &all_relu_param, 0, sizeof(all_relu_param)); memset( &ewise_copy_param, 0, sizeof(ewise_copy_param)); libxsmm_matrix_arg arg_array[8]; libxsmm_matrix_eqn_param eqn_param; memset( &eqn_param, 0, sizeof(eqn_param)); LIBXSMM_ALIGNED(float a[bc], 64); /* could also get moved into the scratch but left on the private stack as these are small, same below */ LIBXSMM_ALIGNED(float b[bc], 64); LIBXSMM_ALIGNED(float c[bc], 64); int n, cp; int cpxnt; if (norm_type == MY_BN_FULL_NORM) { for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb, cb; LIBXSMM_ALIGNED(float lcl_dgamma_ptr[bc], 64); LIBXSMM_ALIGNED(float lcl_dbeta_ptr[bc], 64); float *dgamma_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, n, 0, N, bc); float *dbeta_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, n, 0, N, bc); all_zero_param.out.primary = lcl_dgamma_ptr; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = lcl_dbeta_ptr; cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* lcl_dgamma_ptr[cb] = 0.0f; */ /* lcl_dbeta_ptr[cb] = 0.0f; */ /* } */ for(cb = 0; cb < bc; cb++){ float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc); float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc); a[cb] = 1.0f / ((float)sqrt(lvar + eps)); b[cb] = -a[cb] * lmean; /* a[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */ /* b[cb] = -a[cb]*mean[cp*bc + cb]; */ } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[4].primary = lcl_dgamma_ptr; arg_array[5].primary = lcl_dbeta_ptr; arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); for(hwb=0; hwb < num_HW_blocks; hwb++){ if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { all_relu_param.op.primary = (void*)(&alpha); all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */ all_relu_param.in.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc/8) : NULL /*&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc) */ ); /* dout_fwd ? nonsense? */ all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */ cfg.inv_relu_kernel(&all_relu_param); } /* ReLU/mask */ if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { ewise_copy_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); ewise_copy_param.out.primary = &LIBXSMM_VLA_ACCESS(4, din_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); cfg.ewise_copy_kernel(&ewise_copy_param); } /* Eltwise */ } arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); eqn_param.inputs = arg_array; eqn_param.output.primary = lcl_dgamma_ptr; cfg.dgamma_func(&eqn_param); /* dgamma += (a * inp + b) * dout */ eqn_param.output.primary = lcl_dbeta_ptr; cfg.dbeta_func(&eqn_param); /* dbeta += dout */ } copy_param.in.primary = lcl_dgamma_ptr; copy_param.out.primary = dgamma_ncp_ptr; cfg.helper_copy_kernel(&copy_param); copy_param.in.primary = lcl_dbeta_ptr; copy_param.out.primary = dbeta_ncp_ptr; cfg.helper_copy_kernel(&copy_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* dgamma_ncp_ptr[cb] = lcl_dgamma_ptr[cb]; */ /* dbeta_ncp_ptr[cb] = lcl_dbeta_ptr[cb]; */ /* } */ } libxsmm_barrier_wait(cfg.barrier, ltid); for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) { all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc); cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* pdgamma[cp*bc + cb] = 0.0f; */ /* pdbeta[cp*bc + cb] = 0.0f; */ /* } */ int ni; for(ni = 0; ni < N; ni++){ add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc); cfg.helper_add_kernel(&add_param); add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc); cfg.helper_add_kernel(&add_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* pdgamma[cp*bc + cb] += dgamma_N[cp*N*bc + n*bc + cb]; */ /* pdbeta[cp*bc + cb] += dbeta_N[cp*N*bc + n*bc + cb]; */ /* } */ } } libxsmm_barrier_wait(cfg.barrier, ltid); } /* this is only computed in case of full backward (norm_type ~ 0) */ for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb, cb; for(cb = 0; cb < bc; cb++){ float lgamma = LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, bc); float ldgamma = LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, bc); float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc); float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc); float ldbeta = LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, bc); a[cb] = lgamma / ((float)sqrt(lvar + eps)); /* a = gamma_ptr[bc] * brstd_ptr[bc] */ b[cb] = -a[cb] * scale * ldgamma / ((float)sqrt(lvar + eps)); /* b = gamma_ptr[bc] * brstd_ptr[bc] * del_gamma_ptr[v] * brstd_ptr[bc] * recp_nhw */ c[cb] = -b[cb] * lmean - a[cb] * scale * ldbeta ; /* c = -gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * del_beta_ptr[bc] + gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * bmean_ptr[bc] * del_gamma_ptr[bc] * brstd_ptr[bc]) */ } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); arg_array[7].primary = c; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); cfg.din_func(&eqn_param); /* din = dout * a + b * inp + c */ } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_bn_bwd_exec_bf16( my_bn_bwd_config cfg, libxsmm_bfloat16 *pdout, const libxsmm_bfloat16 *pinp, const float *mean, const float *var, const float *pgamma, const unsigned char *prelumask, libxsmm_bfloat16 *pdin, libxsmm_bfloat16 *pdin_add, float *pdgamma, float *pdbeta, float eps, int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type) { const libxsmm_blasint N = cfg.N; const libxsmm_blasint CP = cfg.CP; const libxsmm_blasint HW = cfg.H * cfg.W; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_dN = N * CP; /* compute chunk size */ const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ? (work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_dN = ( ltid * chunksize_dN < work_dN) ? ( ltid * chunksize_dN) : work_dN; const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN; /* number of tasks that could be run in parallel for 1d blocking */ /* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */ const libxsmm_blasint work_C = CP; /* compute chunk size */ const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ? (work_C / cfg.threads) : ((work_C / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin_C = ( ltid * chunksize_C < work_C) ? ( ltid * chunksize_C) : work_C; const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); const float scale = 1.0f / ((float)N*HW); /* Scaling parameter*/ LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, din, pdin, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dout, pdout, CP, HW, bc); /* [N, CP, HW, bc] */ LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, const float, mean, mean, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, const float, var, var, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, bc); /* [CP, bc] */ LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, din_add, pdin_add, CP, HW, bc); /* [N, CP, HW, bc] */ float alpha = 0.0f; LIBXSMM_VLA_DECL(4, const unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */ libxsmm_matrix_arg arg_array[8]; libxsmm_matrix_eqn_param eqn_param; memset( &eqn_param, 0, sizeof(eqn_param)); LIBXSMM_ALIGNED(float a[bc], 64); /* could also get moved into the scratch but left on the private stack as these are small, same below */ LIBXSMM_ALIGNED(float b[bc], 64); LIBXSMM_ALIGNED(float c[bc], 64); int n, cp; int cpxnt; if (norm_type == MY_BN_FULL_NORM) { const libxsmm_blasint dbeta_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float); LIBXSMM_VLA_DECL(3, float, dgamma_N, ((float*)scratch), N, bc); /* [CP, N, bc] */ LIBXSMM_VLA_DECL(3, float, dbeta_N, ((float*)scratch) + dbeta_N_offset, N, bc); /* [CP, N, bc] */ LIBXSMM_ASSUME_ALIGNED(dgamma_N_, 64); LIBXSMM_ASSUME_ALIGNED(dbeta_N_, 64); libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_meltw_unary_param copy_param; libxsmm_meltw_unary_param all_relu_param; libxsmm_meltw_unary_param ewise_copy_param; memset( &all_zero_param, 0, sizeof(all_zero_param)); memset( &add_param, 0, sizeof(add_param)); memset( &copy_param, 0, sizeof(copy_param)); memset( &all_relu_param, 0, sizeof(all_relu_param)); memset( &ewise_copy_param, 0, sizeof(ewise_copy_param)); for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb, cb; LIBXSMM_ALIGNED(float lcl_dgamma_ptr[bc], 64); LIBXSMM_ALIGNED(float lcl_dbeta_ptr[bc], 64); float *dgamma_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, n, 0, N, bc); float *dbeta_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, n, 0, N, bc); all_zero_param.out.primary = lcl_dgamma_ptr; cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = lcl_dbeta_ptr; cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (cb = 0; cb < bc; cb++) { */ /* lcl_dgamma_ptr[cb] = 0.0f; */ /* lcl_dbeta_ptr[cb] = 0.0f; */ /* } */ for(cb = 0; cb < bc; cb++){ float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc); float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc); a[cb] = 1.0f / ((float)sqrt(lvar + eps)); b[cb] = -a[cb] * lmean; /* a[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */ /* b[cb] = -a[cb]*mean[cp*bc + cb]; */ } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[4].primary = lcl_dgamma_ptr; arg_array[5].primary = lcl_dbeta_ptr; arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); for(hwb=0; hwb < num_HW_blocks; hwb++){ if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { all_relu_param.op.primary = (void*)(&alpha); all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */ all_relu_param.in.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ? (void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc/8) : NULL /*&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc) */ ); /* dout_fwd ? nonsense? */ all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */ cfg.inv_relu_kernel(&all_relu_param); } /* ReLU/mask */ if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) { ewise_copy_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); ewise_copy_param.out.primary = &LIBXSMM_VLA_ACCESS(4, din_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); cfg.ewise_copy_kernel(&ewise_copy_param); } /* Eltwise */ } arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); eqn_param.inputs = arg_array; eqn_param.output.primary = lcl_dgamma_ptr; cfg.dgamma_func(&eqn_param); /* dgamma += (a * inp + b) * dout */ eqn_param.output.primary = lcl_dbeta_ptr; cfg.dbeta_func(&eqn_param); /* dbeta += dout */ } copy_param.in.primary = lcl_dgamma_ptr; copy_param.out.primary = dgamma_ncp_ptr; cfg.helper_copy_kernel(&copy_param); copy_param.in.primary = lcl_dbeta_ptr; copy_param.out.primary = dbeta_ncp_ptr; cfg.helper_copy_kernel(&copy_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* dgamma_ncp_ptr[cb] = lcl_dgamma_ptr[cb]; */ /* dbeta_ncp_ptr[cb] = lcl_dbeta_ptr[cb]; */ /* } */ } libxsmm_barrier_wait(cfg.barrier, ltid); for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) { all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc); cfg.all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc); cfg.all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* pdgamma[cp*bc + cb] = 0.0f; */ /* pdbeta[cp*bc + cb] = 0.0f; */ /* } */ int ni; for(ni = 0; ni < N; ni++){ add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc); cfg.helper_add_kernel(&add_param); add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc); add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, ni, 0, N, bc); add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc); cfg.helper_add_kernel(&add_param); /* #pragma omp simd */ /* for (int cb = 0; cb < bc; cb++) { */ /* pdgamma[cp*bc + cb] += dgamma_N[cp*N*bc + n*bc + cb]; */ /* pdbeta[cp*bc + cb] += dbeta_N[cp*N*bc + n*bc + cb]; */ /* } */ } } libxsmm_barrier_wait(cfg.barrier, ltid); } /* this is only computed in case of full backward (norm_type ~ 0) */ for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) { n = cpxnt%N; cp = cpxnt/N; int hwb, cb; for(cb = 0; cb < bc; cb++){ float lgamma = LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, bc); float ldgamma = LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, bc); float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc); float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc); float ldbeta = LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, bc); a[cb] = lgamma / ((float)sqrt(lvar + eps)); /* a = gamma_ptr[bc] * brstd_ptr[bc] */ b[cb] = -a[cb] * scale * ldgamma / ((float)sqrt(lvar + eps)); /* b = gamma_ptr[bc] * brstd_ptr[bc] * del_gamma_ptr[v] * brstd_ptr[bc] * recp_nhw */ c[cb] = -b[cb] * lmean - a[cb] * scale * ldbeta ; /* c = -gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * del_beta_ptr[bc] + gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * bmean_ptr[bc] * del_gamma_ptr[bc] * brstd_ptr[bc]) */ } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); arg_array[7].primary = c; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); eqn_param.output.primary = (void*)&LIBXSMM_VLA_ACCESS(4, din, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);; eqn_param.inputs = arg_array; cfg.din_func(&eqn_param); /* din = dout * a + b * inp + c */ } } libxsmm_barrier_wait(cfg.barrier, ltid); }
add-brisbane.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header-brisbane.h" //--------------------------------------------------------------------- // addition of update to the vector u //--------------------------------------------------------------------- void add() { int i, j, k, m; size_t kernel_add_0_off[3] = { 1, 1, 1 }; size_t kernel_add_0_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_add_0; brisbane_kernel_create("add_0", &kernel_add_0); brisbane_kernel_setmem(kernel_add_0, 0, mem_u, brisbane_rw); brisbane_kernel_setmem(kernel_add_0, 1, mem_rhs, brisbane_rd); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_add_0, 3, kernel_add_0_off, kernel_add_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(4) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { u[m][k][j][i] = u[m][k][j][i] + rhs[m][k][j][i]; } } } } #endif }
private-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main() { int i, n = 7; int a[n], suma=50; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel { suma=0; #pragma omp for for (i=0; i<n; i++) { suma = suma + a[i]; printf("thread %d suma a[%d] / ", omp_get_thread_num(), i); } printf("\n* thread %d suma= %d", omp_get_thread_num(), suma); } printf("\n"); printf("suma=%d \n",suma); }
mxEvaluate.c
#include "../../SWEAbstractNumFluxSolver1d/private/SWENumFlux1d.h" void evaluateHLLFormula(const double hmin, ///< water threshold const double gra, ///< gravity acceleration const double hM, ///< local water depth const double qnM, ///< local flux variable const double hP, ///< adjacent water depth const double qnP, ///< adjacent flux double *Fhn, ///< numerical flux term double *Fqxn ///< numerical flux term ) { // double FhM, FqnM, FhP, FqnP; double Em[2], Ep[2]; evaluateFluxTerm1d(hmin, gra, hM, qnM, Em); evaluateFluxTerm1d(hmin, gra, hP, qnP, Ep); /* calculation of wave speed */ double sM, sP; if ((hM > hmin) & (hP > hmin)) { double unM = qnM / hM; double unP = qnP / hP; double us = (0.5 * (unM + unP) + sqrt(gra * hM) - sqrt(gra * hP)); double cs = (0.5 * (sqrt(gra * hM) + sqrt(gra * hP)) + 0.25 * (unM - unP)); sM = min(unM - sqrt(gra * hM), us - cs); sP = max(unP + sqrt(gra * hP), us + cs); } else if ((hM > hmin) & (hP <= hmin)) { double unM = qnM / hM; sM = (unM - sqrt(gra * hM)); sP = (unM + 2 * sqrt(gra * hM)); } else if ((hM <= hmin) & (hP > hmin)) { double unP = qnP / hP; sM = (unP - 2 * sqrt(gra * hP)); sP = (unP + sqrt(gra * hP)); } else { /* both dry element */ sM = 0; sP = 0; } /* HLL function */ if ((sM >= 0) & (sP > 0)) { *Fhn = Em[0]; *Fqxn = Em[1]; } else if ((sM < 0) & (sP > 0)) { *Fhn = (sP * Em[0] - sM * Ep[0] + sM * sP * (hP - hM)) / (sP - sM); *Fqxn = (sP * Em[1] - sM * Ep[1] + sM * sP * (qnP - qnM)) / (sP - sM); } else if ((sM < 0) & (sP <= 0)) { *Fhn = Ep[0]; *Fqxn = Ep[1]; } else if ((sM == 0) & (sP == 0)) { *Fhn = 0; *Fqxn = 0; } else { mexPrintf("Matlab:%s:ErrWaveSpeed\n", __FILE__); mexPrintf("The wave speed computation occurs an error."); } return; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { FluxSolver solver = ConvertInputMexVariable1d(nlhs, nrhs, prhs); const size_t NdimOut = 3; const mwSize dimOut[3] = {solver.TNfp, solver.K, 2}; plhs[0] = mxCreateNumericArray(NdimOut, dimOut, mxDOUBLE_CLASS, mxREAL); double *Fh = mxGetPr(plhs[0]); double *Fqx = Fh + solver.TNfp * solver.K; #ifdef _OPENMP #pragma omp parallel for num_threads(DG_THREADS) #endif for (int k = 0; k < solver.K; k++) { for (int n = 0; n < solver.TNfp; n++) { const size_t sk = k * solver.TNfp + n; const double nx = solver.nx[sk]; double qnM, qnP; RotateFluxToNormal1d(solver.huM[sk], nx, &qnM); RotateFluxToNormal1d(solver.huP[sk], nx, &qnP); double Fqns; evaluateHLLFormula(solver.hmin, solver.gra, solver.hM[sk], qnM, solver.hP[sk], qnP, Fh + sk, &Fqns); RotateNormalFluxToCoordinate1d(Fqns, nx, Fqx + sk); } } return; }
fs_csc_inspector.h
#include<vector> #include <cassert> #include<set> // Makes an edge inside dependence graph inline void connect(int v, int w, std::vector<std::vector<int>> &DAG){ DAG[v].push_back( w ); } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void fs_csc_inspector(int n, int* Lp, int* Li, std::vector<std::vector<int>> &DAG){ // int In_2, In_4, Out_2; // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; connect(In_2,Out_2, DAG); //DAG[In_2].push_back(Out_2); } } } } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void fs_csc_inspector(int n, int* Lp, int* Li, std::vector<std::set<int>> &DAG){ int In_2, In_4, Out_2; // Inspector #pragma omp parallel for private(In_2, In_4,Out_2) schedule(static) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].insert( Out_2 ); } } } }
shape.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * shape.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef SHAPE_H_ #define SHAPE_H_ #include <cstring> #include <cstdio> #include "../dll.h" #include "../nd4jmalloc.h" #include "../templatemath.h" #include "../helpers/logger.h" #include "../pointercast.h" #include "../cnpy/cnpy.h" #include <op_boilerplate.h> #define MAX_DIMENSION 0x7fffffff #define MAX_NUM_THREADS 1024 #define MAX_RANK 32 #define MAX_SHAPEINFOLENGTH 2*MAX_RANK+4 #define MAX_COORD 3 #define PREALLOC_SIZE 33554432 #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include <helpers/sharedmem.h> #endif #ifdef __CUDACC__ #define INLINEDEF inline #else #define INLINEDEF inline #endif #include "../pairwise_util.h" #include <stdint.h> #include <array/ArrayOptions.h> namespace shape { /** * Shape information approximating * the information on an ndarray */ struct ND4J_EXPORT ShapeInformation { _CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0) : shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_) {} Nd4jLong *shape; Nd4jLong *stride; char order; int rank; int offset; int elementWiseStride; }; /** * Indexing information * for bounds checking */ struct ND4J_EXPORT CurrentIndexing { int numElementsPerThread; int blockStartingIndex; int startingThreadIndex; int endingThreadIndex; }; ND4J_EXPORT _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2); ND4J_EXPORT _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim); template <typename T> ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length); ND4J_EXPORT _CUDA_HD void traceNew(int id); ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength); ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder); ND4J_EXPORT _CUDA_HD bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder, Nd4jLong* target); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output); //ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer); #ifdef __CUDACC__ template <typename T> __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager); __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size); #endif /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret); ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order); // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * @param toCopy the shape to copy * @return a copy of the original struct */ ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy); ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo); /** * copy-past from java hasDefaultStridesForShape function * check whether array is not permuted and has contiguous elements in memory */ ND4J_EXPORT _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer); /** * * @param length * @param shape * @param rearrange * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange); /** * In place permute swap * @param length * @param shape * @param rearrange */ ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange); ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange); /** * Rearrange the permute indexes * according to which dimensions are specified. * * For example, dimension is implicitly: * 0,1,2 * * If you want to do a reduce along dimensions 0 and 1, * you need to permute the indexes to be: * 2,0,1 * * which will give us the ability to ierate along an element * wise stride. */ ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength); /** * This method does inplace transpose of given shapeBuffer * * @param shapeBuffer */ ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer); /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride); /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength); /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of cthe shape */ ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank); /** * When 1 dimension is the whole length of the * array */ ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD int isVector(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim); ND4J_EXPORT _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank); INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo); /** * Returns the shape portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy); template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes); /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ //ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange); /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer); /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank); ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD int shapeInfoLength(const Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo); /** * Returns the rank portion of * an information buffer */ ND4J_EXPORT _CUDA_HD int rank(const Nd4jLong *buffer); /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer); /** * Returns the stride portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer); /** * Compute the length of the given shape */ ND4J_EXPORT _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape); /*** * Returns the offset portion of an information buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer); ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer); /** * Returns the ordering * for this shape information buffer */ ND4J_EXPORT _CUDA_HD char order(const Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer * relative to a dimension and ordering for a reduction index */ ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength); /** * Returns whether * the given shape info buffer * represents a scalar shape */ ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info); /** * Returns whether * the given shape information * represents a scalar * shape or not */ ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength); /** * Iterate over a given set of indexes * the begin and end indexes are 0 based. * 1 padding is automatically assumed for the ending. * * For example if you want to iterate over 0 to 4 * it will go to 4 rather than 3. * * indexes should be the indexes to exclude * indexes length should be the length of indexes */ ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end); /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ //#ifdef __CUDACC__ // __device__ //#endif // ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset); /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret); /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment); /** * Range between from and two with an * increment of 1 */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to); /** * Keep the given indexes * in the data */ ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength); /** * Generate reverse copy of the data * @param data * @param length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length); template <typename T1, typename T2> ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length); /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length); /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths); /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2); /** * Computes the tensor along dimension * offset * @param index the index to get the offset for the tad for * @param rank the rank of the shapes and strides * @param info the shape information to use for tad * @param dimension the dimensions to use for computing the tensor along dimensions */ // ND4J_EXPORT _CUDA_HD int offset(int index, // int rank, // shape::ShapeInformation *info, // Nd4jLong *dimension, // int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i); /** * Computes the number of tads per block * */ ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads); // ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension, // int dimensionLength); /** * Returns a shape buffer * for the shape information metadata. */ ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info); ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret); /** * Returns the number of elements per thread */ //#ifdef __CUDACC__ // __device__ //#endif // int numElementsPerThread(int N); /** * Returns the block starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int blockStartingIndex(int N); /** * Returns the thread starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadStartingIndex(int N, int stride, int offset); /** * Returns the thread ending index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadEndingIndex(int N, int stride, int offset); /** * Returns indexing information * for the current kernel invocation */ //#ifdef __CUDACC__ // __device__ //#endif // CurrentIndexing *currentIndex(int N, int offset, int stride); /** Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad); /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal); /** * Computes the number of tads * per reduce index for the * reduction tad. */ ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal); /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum); /** * Returns the prod of the data * up to the given length */ ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length); ND4J_EXPORT _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length); /** * Returns the rear most left over item not present in * the dimension array. This assumes that the dimension array is sorted. * * For example, given a dimension array of: * 0,2 * * and * * 12,4,2,1 in data * * You end up with 1 (data[3]) * since the first item won't match * the last item of the dimension array */ // ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength); /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, const Nd4jLong *indices,int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices); ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape, Nd4jLong index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out); /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ ND4J_EXPORT _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices); /** * increment n-dimensional array by one iteration by changing coord appropriately * for example we have array with shape {2, 3}: * - if input coord = {0,1}, then output coord = {0,2} * - if input coord = {0,2}, then output coord = {1,0} * so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2} */ /* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1} * arrLen - array length */ ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen); ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order); /** * Compute the real linear indices for the given shape and stride */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride); /** * Compute the real linear indices for the * given shape buffer. Shape,stride and rank are derived * from the buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index,Nd4jLong *out); ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides); ND4J_EXPORT _CUDA_HD void printIntArray(Nd4jLong *arr,int length); ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length); template<typename T> ND4J_EXPORT _CUDA_HD void printArray(T *arr,int length, const char *message); ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr); // ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer); // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also sort input array of dimensions, this operation is also necessary for creating TAD object ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions); // return absolute index of array min, min is sub-array of max, index to be returned is min index and corresponds to maxIdx of max array ND4J_EXPORT _CUDA_H Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx); ND4J_EXPORT _CUDA_HD void shapeOldScalar(nd4j::DataType dtype, Nd4jLong* const buffer, const char order); //END HEADERS //BEGIN IMPLEMENTATIONS #ifdef __CUDACC__ template <typename T> __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) { // if we go for 3 dimensions coord space or below - just use shared memory for that if (size <= MAX_COORD * 4) { Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD); return ptr; } else { // otherwise go to preallocated global memory :( int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid * size > PREALLOC_SIZE - size) { return (Nd4jLong *) malloc(size); } else { Nd4jLong *ret = buffer; ret += (tid * size); return ret; } } } #endif #ifdef __CUDACC__ /** * BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES */ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { Nd4jLong *ret = buffer; ret += (threadIdx.x * size); return ret; } #endif /** * Length of a tad given * the shape information */ INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Tad element wise stride: * given the inner most dimension (the sorted dimension of the last) * the element wise stride of the tad (disregarding order) is the * last dimension's stride. * * For a given singular dimension this will just be the only entry. * For example, given the following c order shape/stride: * 2,2,3,2 * 12,6,2,1 * * The tad element wise stride for 3 will be 1. * For zero it wil be 12 * * For 2,3 it's 1 * * Note here that the multi dimensional 2,3 case * is equivalent to the singular 3 case. * * * Note that this is for the dimension that ultimately * ends up removed. * * Again: this may not preserve ordering of the tad * but maybe used for reductions. */ INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) { return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength); } INLINEDEF _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::shapeEquals(shape::rank(shapeInfo1),shape::shapeOf(shapeInfo1),shape::rank(shapeInfo2),shape::shapeOf(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) { if(rank1 != rank2) return false; for(int i = 0; i < rank1; i++) { if(stride1[i] != stride2[i]) return false; } return true; } INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) { Nd4jLong *retShape; int retShapeLength; if(dimensionLength == 1 && dimension[0] == 2147483647) { retShape = new Nd4jLong[2]; retShape[0] = 1; retShape[1] = 1; retShapeLength = 2; } else { retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength); retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength; } //ensure vector is proper shape if (retShapeLength == 1) { if (dimension[0] == 0) { auto newRetShape = new Nd4jLong[2]{1, retShape[0]}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } else { auto newRetShape = new Nd4jLong[2]{retShape[0], 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } } else if (retShapeLength == 0) { auto newRetShape = new Nd4jLong[2]{1, 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } auto ret = shape::shapeBuffer(retShapeLength, nd4j::ArrayOptions::dataType(originalShapeBuffer), retShape); delete[] retShape; return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) { Nd4jLong *theShape = shape::shapeOf(shapeInfo); Nd4jLong *theStride = shape::stride(shapeInfo); int rank = dimensionLength == 1 ? 2 : dimensionLength; Nd4jLong *ret = buffer; //set the rank ret[0] = rank; Nd4jLong *retShape = shape::shapeOf(ret); Nd4jLong *retStride = shape::stride(ret); int len = rank; if(dimensionLength == 1) { if(shape::isMatrix(theShape,shape::rank(shapeInfo))) { if(dimension[0] == 0) { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong newStride[2] = {1,theStride[dimension[0]]}; Nd4jLong newShape[2] = {1,theShape[dimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong *newIndexes = dimension; if(reverseCopyStride) shape::reverseCopyTo(theStride, retStride, newIndexes, len); else shape::copyTo(len, theStride, retStride, newIndexes); shape::copyTo(len, theShape, retShape, newIndexes); } ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo); return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) { int rank = dimensionLength == 1 ? 2 : dimensionLength; traceNew(4); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) { traceNew(5); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return createShapeInfo(shape, stride, rank, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) { buffer[0] = rank; Nd4jLong *retShape = shape::shapeOf(buffer); Nd4jLong *retStride = shape::stride(buffer); for(int i = 0;i < rank; i++) { retShape[i] = shape[i]; retStride[i] = stride[i]; } return buffer; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) { if (isVector(shape, rank)) { traceNew(5); Nd4jLong *ret = new Nd4jLong[2]; for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; traceNew(6); Nd4jLong *stride = new Nd4jLong[dimensions]; int st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) { if (isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; int st = startNum; for (int j = 0; j < rank; j++) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) { traceNew(7); Nd4jLong *stride = new Nd4jLong[rank]; if (rank == 1) { stride[0] = 1; return stride; } // if (shape::isVector(shape, rank)) { // for (int i = 0; i < 2; i++) // stride[i] = 1; // return stride; // } int st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) { if (rank == 1) { ret[0] = 1; return ret; } // if (shape::isVector(shape, rank)) { // for (int i = 0; i < 2; i++) // ret[i] = 1; // return ret; // } int st = startNum; for (int j = rank - 1; j >= 0; j--) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) { return calcStridesFortran(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStridesFortran(shape, rank, 1, ret); } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) { return calcStrides(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStrides(shape, rank, 1, ret); } INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shape, const char order) { int rank = shape[0]; int doubleRank = 2*rank; if (rank > 0) { if (order == 'c') { shape[doubleRank] = 1; // set unity as last stride for c order for (int j = 1; j < rank; ++j) { shape[doubleRank - j] = shape[doubleRank - j + 1] * shape[rank + 1 - j]; } } else { shape[rank + 1] = 1; // set unity as first stride for f order for (int j = rank + 1; j < doubleRank; ++j) { shape[j + 1] = shape[j] * shape[j - rank]; } } } // set last 2 elements in shape shape[doubleRank + 2] = 1; shape[doubleRank + 3] = (int)order; } // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) { for(int i=0; i<dimSize-1; ++i) if(dimensions[i] > dimensions[i+1]) return true; return false; } /** * @param toCopy the shape to copy * @return a copy of the original struct */ INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) { auto copy = new ShapeInformation; traceNew(8); copy->shape = new Nd4jLong[toCopy->rank]; memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong)); traceNew(9); copy->stride = new Nd4jLong[toCopy->rank]; for (int i = 0; i < toCopy->rank; i++) { copy->stride[i] = toCopy->stride[i]; } copy->order = toCopy->order; copy->rank = toCopy->rank; copy->offset = toCopy->offset; copy->elementWiseStride = toCopy->elementWiseStride; return copy; } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) { if (rank == 0) return 1; if(shape::isVector(shape,rank)) { return stride[rank - 1]; } else { int oldnd; Nd4jLong *olddims = shape::copyOf(rank, shape); Nd4jLong *oldstrides = shape::copyOf(rank, stride); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; traceNew(10); auto newStrides = new Nd4jLong[rank]; oldnd = 0; //set the shape to be 1 x length int newShapeRank = 2; auto newShape = new Nd4jLong[newShapeRank]; newShape[0] = 1; newShape[1] = shape::prodLong(shape, rank); /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < rank; oi++) { if (shape[oi] != 1) { olddims[oldnd] = shape[oi]; oldstrides[oldnd] = stride[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newShapeRank; ni++) { np *= newShape[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newShapeRank && oi < oldnd) { np = newShape[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShape[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShape[nk]; } } ni = nj++; oi = oj++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = stride[rank - 1]; } if (isFOrder) { if (ni >= 1) last_stride *= newShape[ni - 1]; } for (nk = ni; nk < newShapeRank; nk++) { newStrides[nk] = last_stride; } //returns the last element of the new stride array int ret = last_stride; delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return ret; } } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength) { if(dimensionLength == 1) { return stride[dimension[0]]; } return -1; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape) { Nd4jLong *stride = shape::calcStrides(shape, rank); traceNew(11); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'c'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype); return shapeInfoBuffer; } /** * This is special method, it returns ONLY 2D shapebuffer. * * This method is used only for SoftMax */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer) { Nd4jLong stride[MAX_RANK]; shape::calcStrides(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'c'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, buffer); nd4j::ArrayOptions::setDataType(buffer, dtype); return buffer; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape) { auto stride = shape::calcStridesFortran(shape,rank); traceNew(12); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'f'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype); return shapeInfoBuffer; } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output) { Nd4jLong stride[MAX_RANK]; shape::calcStridesFortran(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'f'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, output); nd4j::ArrayOptions::setDataType(output, dtype); return output; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) { Nd4jLong length = shape::prodLong(shape,rank); traceNew(13); Nd4jLong *ret = new Nd4jLong[length]; for(int i = 0; i < length; i++) { Nd4jLong *idx = shape::ind2sub(rank, shape, i); ret[i] = shape::getOffset(0, shape, stride, idx, rank); delete[] idx; } return ret; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) { return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer)); } /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ INLINEDEF _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices) { int index = 0; int shift = 1; for(int i = 0; i < rank; i++) { index += shift * indices[i]; shift *= shape[i]; } return index; } template <typename T> INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) { #pragma omp simd for (int e = 0; e < length; e++) buffer[e] = value; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) { auto ret = new Nd4jLong[rank]; ind2sub(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index) { return ind2sub(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) { int denom = numIndices; for(int i = rank - 1; i >= 0; i--) { denom /= shape[i]; ret[i] = index / denom; index %= denom; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) { ind2sub(rank,shape, index, shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong * ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) { auto ret = new Nd4jLong[rank]; ind2subC(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong *ind2subC(int rank, Nd4jLong *shape, Nd4jLong index) { return ind2subC(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) { auto denom = numIndices; for(int i = 0; i < rank; i++) { denom /= shape[i]; if(denom > 0) { ret[i] = index / denom; index %= denom; } else ret[i] = 0; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) { ind2subC(rank,shape, index,shape::prodLong(shape,rank),out); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) { Nd4jLong offset = 0; for(int i = 1; i <= *shapeInfo; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + *shapeInfo]; index %= arrLen; } } return offset; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order) { Nd4jLong offset = 0; if(order == 'c') { for(int i = 1; i <= *shapeInfo; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + *shapeInfo]; index %= arrLen; } } } else { for(int i = *shapeInfo; i >= 1 ; --i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + *shapeInfo]; index %= arrLen; } } } return offset; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out) { if(shape::order(shapeInfo) == 'f') { shape::ind2sub( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } else { shape::ind2subC( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong *out) { ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) { traceNew(16); Nd4jLong *ret = new Nd4jLong[length]; for (int i = 0; i < length; i++) { ret[i] = shape[rearrange[i]]; } return ret; } /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) { if(length == 1) { return; } else { Nd4jLong *shapeDeref = *shape; if(shape::prodLong(shapeDeref,length) < 2) { return; } } bool inOrder = true; for(int i = 0; i < length - 1; i++) { inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1]; } //all in order, nothing to do if(inOrder) return; Nd4jLong *shapeDeref = *shape; //we know they are just reversed, dimension length of 2 if(length == 2) { auto shapeFirst = shapeDeref[0]; auto shapeSecond = shapeDeref[1]; shapeDeref[0] = shapeSecond; shapeDeref[1] = shapeFirst; return; } else if(length == 1) { //no permute return; } auto temp = new Nd4jLong[length]; memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length); for (int i = 0; i < length; i++) { shapeDeref[i] = temp[rearrange[i]]; } delete[] temp; } INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) { if(shapeBuffer != out) memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer))); doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out); } INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) { auto len = shape::shapeInfoLength(shape::rank(shapeBuffer)); Nd4jLong *copy = shape::copyOf(len, shapeBuffer); doPermuteShapeBuffer(copy,rearrange); return copy; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[shapeInfoLength(rank) - 2] = -1; shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[shapeInfoLength(rank) - 2] = -1; shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) { //no swapping needs to happen if(shape::isScalar(shapeBuffer)) { return; } Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); // doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large } /* INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { auto shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); } */ INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange); shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1); delete[] rearrangeCopy1; auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2); shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); shapeBuffer[shape::shapeInfoLength(rank) - 2] = -1; delete[] rearrangeCopy2; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); if(shapeBuffer != tmpBuffer) shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rank) - 2] = -1; shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); } INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) { int delta = originalRank - dimensionLength; traceNew(17); Nd4jLong *ret = new Nd4jLong[originalRank]; for(int i = 0; i < delta; i++) { ret[i] = i + dimensionLength; } for(int i = delta; i < originalRank; i++) { ret[i] = i - delta; } return ret; } /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) { int sd = -1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; if (stride[i] != sd) { cContiguous = 0; break; } /* contiguous, if it got this far */ if (dim == 0) { break; } sd *= dim; } /* check if fortran contiguous */ sd = elementStride; for (i = 0; i < length; ++i) { dim = shape[i]; if (stride[i] != sd) { isFortran = 0; } if (dim == 0) { break; } sd *= dim; } if (isFortran && cContiguous) return 'a'; else if (isFortran && !cContiguous) return 'f'; else if (!isFortran && !cContiguous) return 'c'; else return 'c'; } /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) { if (arrLength != shapeLength) return -1; for (int i = 0; i < arrLength; i++) { if (arr[i] >= arrLength || arr[i] < 0) return -1; } for (int i = 0; i < arrLength; i++) { for (int j = 0; j < arrLength; j++) { if (i != j && arr[i] == arr[j]) return -1; } } return 1; } INLINEDEF _CUDA_HD void traceNew(int id) { //printf("new happened: [%i]\n", id); #ifndef __CUDACC__ //fflush(stdout); #endif } /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) { ShapeInformation *infoDeref = *info; checkArrangeArray(rearrange, rank, rank); shape::doPermuteSwap(rank, &infoDeref->shape, rearrange); shape::doPermuteSwap(rank, &infoDeref->stride, rearrange); char order = getOrder(rank, infoDeref->shape, infoDeref->stride, infoDeref->elementWiseStride); infoDeref->order = order; } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) { if (rank == 0) return 0; if (rank == 1) return 1; if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 1; } return 0; } INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) { int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) { ++numOfNonUnity; posOfNonUnityDim = i-1; } } return numOfNonUnity == 1 && shapeInfo[0] > 2; } INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_HD int isVector(const Nd4jLong *shapeInfo) { return isVector(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::rank(shapeInfo)); } INLINEDEF _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo))[0] == 1; return isVector && shapeFirstOne; } INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && !shapeFirstOne; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) { for(int i = 0; i < rank; i++) { if(shape[i] == shape::prod(shape,rank)) return 1; } return 0; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) { return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 0; } return 1; } INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) { return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns the shape portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) { return buffer + 1; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) { traceNew(18); T *ret = new T[length]; return copyOf(length, toCopy, ret); } template <typename T> INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) { memcpy(ret, toCopy, sizeof(T)*length); return ret; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) { memcpy(to, from, sizeof(T)*length); } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) { for(int i = 0; i < length; i++) { to[i] = from[indexes[i]]; } } /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ /* INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) { Nd4jLong *strideCopy = copyOf(shapeRank, toPermute); checkArrangeArray(rearrange, shapeRank, shapeRank); Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange); delete[] strideCopy; return newStride; } */ /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) { return shape + 1; } INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) { return static_cast<int>(shape::shapeOf(shapeBuffer)[0]); } INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); int newRank = rank - 1; if(newRank < 2) newRank = 2; Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)]; newShapeBuffer[0] = newRank; Nd4jLong *currShape = shape::shapeOf(shapeBuffer); Nd4jLong *currStride = shape::stride(shapeBuffer); //initialize new shape and stride by taking the shape and stride + 1 //and adding to the shape information //a slice is always just taking the existing shape and cutting the first index off //of the shape and stride Nd4jLong *newShape = shape::shapeOf(newShapeBuffer); Nd4jLong *newStride = shape::stride(newShapeBuffer); if(shape::isVector(shapeBuffer)) { Nd4jLong *currShape = shape::shapeOf(shapeBuffer); //row vector: slice index 0 is a valid index, just copy the whole thing if(currShape[0] == 1) { if(sliceIdx == 0) { memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer))); return newShapeBuffer; } } //column vector: this will be a scalar else { delete[] newShapeBuffer; Nd4jLong *scalar = shape::createScalarShapeInfo(); int offset = shape::offset(shapeBuffer); scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx; return scalar; } } else if(shape::isMatrix(shapeBuffer)) { newShape[0] = 1; newShape[1] = currShape[1]; newStride[0] = 1; newStride[1] = currStride[1]; } else { for(int i = 0; i < newRank; i++) { newShape[i] = currShape[i + 1]; newStride[i] = currStride[i + 1]; } } auto indices = new Nd4jLong[rank]; memset((void *) indices,0,rank * sizeof(Nd4jLong)); indices[0] = sliceIdx; Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank); newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset; if(shape::isMatrix(shapeBuffer)) { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1]; } else { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer); } newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1); delete[] indices; return newShapeBuffer; } /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ INLINEDEF _CUDA_HD int shapeInfoLength(int rank) { //FIXME magic numbers return rank * 2 + 4; } INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) { return shapeInfoLength(static_cast<int>(shape[0])); } INLINEDEF _CUDA_HD int shapeInfoLength(const Nd4jLong* shape) { return shapeInfoLength(static_cast<int>(shape[0])); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) { //FIXME magic numbers return (rank * 2 + 4) * sizeof(Nd4jLong); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo) { //FIXME magic numbers return shapeInfoByteLength((int) shapeInfo[0]); } /** * Returns the rank portion of * an information buffer */ INLINEDEF _CUDA_HD int rank(const Nd4jLong *buffer) { return static_cast<int>(buffer[0]); } /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) { traceNew(19); auto info = new ShapeInformation; auto length = shapeInfoLength(rank(buffer)); auto rank = buffer[0]; //start after rank info->shape = buffer + 1; info->stride = buffer + (1 + rank); info->rank = rank; info->offset = buffer[length - 3]; info->elementWiseStride = buffer[length - 2]; Nd4jLong *stride = buffer + 1 + rank; info->stride = stride; info->order = (char) buffer[length - 1]; return info; } /** * Returns the stride portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *stride( Nd4jLong *buffer) { return buffer + (1 + rank(buffer)); } INLINEDEF _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo) { return ((shape::extra(const_cast<Nd4jLong*>(shapeInfo)) & ARRAY_EMPTY) == ARRAY_EMPTY); } /** * Compute the length of the given shape */ INLINEDEF _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); if (rank == 0) { if (isEmpty(shapeInfo)) return 0L; else return 1L; } if (rank == 1) return shapeInfo[1]; return shape::prodLong(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), rank); } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } /*** * Returns the offset * portion of an information buffer */ INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } /** * Returns the ordering * for this shape information buffer */ INLINEDEF _CUDA_HD char order(const Nd4jLong *buffer) { //FIXME magic numbers return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]); } /** * Returns the element wise stride for this information * buffer */ INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer) { return buffer[shapeInfoLength(static_cast<int>(buffer[0])) - 2]; } /** * Returns the element wise stride for this information * buffer relative to a dimension and reduction index */ INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) { if(dimensionLength > 1) { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { //int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; //return tadElementWiseStride; auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } return 1; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } } else { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } } } /** * Returns whether * the given shape info buffer * represents a scalar shape */ INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) { const int rank = shape::rank(info); if(rank > 2) return 0; if(rank == 0) return 1; if(rank == 1) return shape::shapeOf(info)[0] == 1; if(rank == 2) return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1; return 0; } /** * Returns whether * the given shape information * represents a scalar * shape or not */ INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) { const int rank = info->rank; if(rank > 2) return 0; if(rank == 1) return info->shape[0] == 1; if(rank == 2) return info->shape[0] == 1 && info->shape[1] == 1; return 0; } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) { int count = 0; int absLength = dataLength - indexesLength; for (int i = 0; i < dataLength && count < absLength; i++) { int contains = 0; for (int j = 0; j < indexesLength; j++) { if (i == indexes[j]) { contains = 1; break; } } if (!contains) { ret[count] = data[i]; count++; } } } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) { auto lengthOfArr = dataLength - indexesLength; if(lengthOfArr < 0) { printf("Remove index call created a <= 0 length array. This was likely not intended."); } auto ret = new T1[lengthOfArr]; memset(ret,0,sizeof(T1) * lengthOfArr); removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret); return ret; } INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) { int len = end - indexesLength; traceNew(20); auto ret = new Nd4jLong[len]; int retIdx = 0; //not here that we do 0 based indexing for end - this assumes things like: //0 to 4 are specified for(int i = begin; i < end ; i++) { bool found = false; for(int j = 0; j < indexesLength; j++) { if(indexes[j] == i) { found = true; break; } } if(!found) { ret[retIdx++] = i; } } return ret; } /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) { return offset + threadIdx.x * xInfo->elementWiseStride; } #endif /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) { traceNew(21); Nd4jLong *ret = new Nd4jLong[2]; if (dimension == 0) { ret[0] = 1; ret[1] = shape[0]; } else { ret[0] = shape[0]; ret[1] = 1; } return ret; } /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) { return ensureVectorShape(shape, 0); } /** * This method does STRICT comparison for two shape buffers * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we do full comparison here int length = shape::shapeInfoLength(shapeA[0]); for (int e = 1; e < length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } INLINEDEF _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim) { if (dim >= 0) return shape[1+dim]; else return shape[1+(rank(shape) + dim)]; } /** * This method does SOFT comparison for two shape buffers, we compare only rank & shapes * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we compare only shapes, and ignoring stride & ews auto length = shapeA[0]; for (int e = 1; e <= length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } INLINEDEF _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { return equalsSoft(shapeA, shapeB) && shapeA[shapeInfoLength(shapeA) - 3] == shapeB[shapeInfoLength(shapeB) - 3]; } /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to, int increment) { int diff = nd4j::math::nd4j_abs<int>(from - to); int retLength = diff / increment; T *ret; traceNew(22); if(diff / increment < 1) ret = new T[1]; else ret = new T[diff / increment]; if (from < to) { int count = 0; for (int i = from; i < to; i += increment) { if (count >= retLength) break; ret[count++] = i; } } else if (from > to) { int count = 0; for (int i = from - 1; i >= to; i -= increment) { if (count >= retLength) break; ret[count++] = i; } } return ret; } /** * Generate a range * beginning at from and ending at to * incrementing by 1 * @param from the start * @param to the end * @return the int array starting at from and ending at to */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to) { return range<T>(from, to, 1); } /** * Keep the given indexes in the data * @param data * @param index * @param indexLength * @param dataLength * @return */ INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) { traceNew(23); Nd4jLong *ret = new Nd4jLong[indexLength]; int count = 0; for (int i = 0; i < dataLength; i++) { int contains = 0; for (int j = 0; j < indexLength; j++) { if (i == index[j]) { contains = 1; break; } } if (contains) ret[count++] = data[i]; } return ret; } /** * Generate a reverse * copy of the data */ template <typename T> INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) { if (length < 1) return nullptr; traceNew(24); T *copy = new T[length]; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = data[i]; copy[i] = data[length - i - 1]; copy[length - i - 1] = temp; } return copy; } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[i]; to[i] = from[length - i - 1]; to[length - i - 1] = temp; } } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[indexes[i]]; to[i] = from[indexes[length - i - 1]]; to[length - i - 1] = temp; } } /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) { traceNew(25); T *ret = new T[arr1Length + arr2Length]; std::memcpy(ret, arr1, arr1Length * sizeof(T)); std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T)); return ret; } /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) { T* ret = new T[numTotalElements]; Nd4jLong count = 0; for (Nd4jLong i = 0; i < numArrays; i++) { for (Nd4jLong j = 0; j < lengths[i]; j++) { ret[count++] = arr[i][j]; } } return ret; } /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) { if(shape::isVector(shape,rank)) { //return total length for row vectors if(dimensionLength == 1 && shape[0] == 1) { return shape::prod(shape,rank); } } else if(rank == dimensionLength) return shape::prod(shape,rank); int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength); traceNew(27); auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength); auto ret = prodLong(ret2, absSelta); delete[] ret2; return ret; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) { auto tensorLength = prodLong(tensorShape, tensorShapeLength); auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength); if (lengthPerSlice2 <= 0) { return 0; } Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) { Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } #ifdef __CUDACC__ /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) { return offset + threadIdx.x * elementWiseStride(xInfo); } #endif /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength) { Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank); Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { Nd4jLong *keepShape = shape::shapeOf(shapeInfo); Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo)); Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, const Nd4jLong *indices, int rank) { Nd4jLong offset = baseOffset; for(int i = 0; i < rank; i++) { if(indices[i] >= shape[i] && shape[i] != 1) { #ifdef __CUDA_ARCH__ printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]); #else printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]); #endif #ifdef __CUDA_ARCH__ if (threadIdx.x == 0 && blockIdx.x == 0) printShapeInfoLinear("getOffsetFailed", rank, shape, stride); #endif return -1; } if(shape[i] != 1) { offset += indices[i] * stride[i]; } } return offset; } /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) { return blockIdx + i * blockSize; } /** * Computes the number of tads per block * */ INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) { return nd4j::math::nd4j_ceil<double, int>(tads / (double) blockSize); } /** * Returns a shape buffer * for the shape information metadata. */ INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) { traceNew(29); auto ret = new Nd4jLong[shapeInfoLength(info->rank)]; int count = 1; int rank = info->rank; ret[0] = info->rank; for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count] = info->order; return ret; } INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) { int count = 1; int rank = info->rank; ret[0] = info->rank; if (ret[0] == 0) { ret[1] = 0; ret[2] = 1; ret[3] = 99; return ret; } for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } INLINEDEF _CUDA_HD void printIntArray(Nd4jLong *arr,int length) { for(int i = 0; i < length; i++) { printf(" %lld ", (long long) arr[i]); } printf("\n"); } INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); Nd4jLong *shape = shape::shapeOf(shapeInfo); printf("Rank %d\n",rank); printf("Shape:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ",(long long) shape[i]); } printf("\n"); Nd4jLong *stride = shape::stride(shapeInfo); printf("Stride:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ", (long long) stride[i]); } printf("\n"); printf("Order %c\n",shape::order(shapeInfo)); } INLINEDEF _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("ShapeInfo: ["); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides) { printf("%s : [", msg); for (int i = 0; i < rank; i++) { printf("%lld, ", (long long) shape[i]); } for (int i = 0; i < rank; i++) { printf("%lld", (long long) strides[i]); if (i < rank - 1) printf(", "); } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("%s : [", msg); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } template <typename T> INLINEDEF _CUDA_HD void printArray(void *varr,int length, const char * message) { auto arr = reinterpret_cast<T*>(varr); if (message != nullptr) printf("%s: [", message); else printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", (float) arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printArray(float *arr,int length) { printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); } /** * Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) { return i / (numElementsPerTad * elementWiseStride); } /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal) { if (tadIndexForOriginal == 0) return 0; return tadIndexForOriginal / (tadsForOriginal / tadsForReduced); } INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *shape = shape::shapeOf(shapeBuffer); Nd4jLong *strides = shape::stride(shapeBuffer); // swap shape for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = shape[idx2]; shape[idx2] = shape[idx1]; shape[idx1] = tmp; } // swap strides for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = strides[idx2]; strides[idx2] = strides[idx1]; strides[idx1] = tmp; } if (shape::order(shapeBuffer) == 'c') shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102; else shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99; } /** * Tad index for linear * @param linearIndex * @param tadLength * @return */ INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) { return linearIndex % tadLength; } /** * Computes the number of tads * per reduce index for the * reduction tad. */ INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) { return tadsForOriginal / tadsForReduce; } /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum) { int tad = tadIndex(i, elementWiseStride, numElementsPerTad); return reductionIndexForTad(tad, tadNum, originalTadNum); } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() { traceNew(30); auto shape = new Nd4jLong[1]; shape[0] = 1; auto stride = new Nd4jLong[1]; stride[0] = 1; auto shapeInformation2 = new ShapeInformation(); shapeInformation2->rank = 1; shapeInformation2->offset = 0; shapeInformation2->stride = stride; shapeInformation2->shape = shape; shapeInformation2->elementWiseStride = 1; shapeInformation2->order = 99; Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2); delete shapeInformation2; delete[] shape; delete[] stride; return ret; } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) { ret[0] = 2; ret[1] = 1; ret[2] = 1; ret[3] = 1; ret[4] = 1; ret[5] = 0; ret[6] = 1; ret[7] = 99; return ret; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length) { Nd4jLong prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) { Nd4jLong *stride = shape::stride(data); //corner case: return the final item when its greater than the max, since its guaranteed to be left over //note here that strides are interpreted in reverse for tad //start from the front rather than the back int rank = shape::rank(data); if(shape::order(data) == 'f') { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } else { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } int ret = stride[0]; return ret; } #ifdef __CUDACC__ __device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) { // we read first element, to find out length of our shapeInfoBuffer int rank = shapeInfoBuffer[0]; int len = shape::shapeInfoLength(rank); for (int i = threadIdx.x; i < len; i += blockDim.x) targetBuffer[i] = shapeInfoBuffer[i]; } #endif INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) { return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder); } // INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) { // unsigned Nd4jLong *shape; // unsigned int ndims, wordSize; // bool fortranOrder; // cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder); // Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder); // delete[] shape; // return ret; // } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) { if(fortranOrder) { Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank, nd4j::FLOAT32,(Nd4jLong *) shape); return shapeBufferRet; } else { Nd4jLong *newShape = new Nd4jLong[rank]; for(int i = 0; i < rank; i++) { newShape[i] = shape[i]; } Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank, nd4j::FLOAT32, newShape); delete[] newShape; return shapeBufferRet; } } INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *strides = shape::stride(const_cast<Nd4jLong*>(shapeBuffer)); char order = shape::order(shapeBuffer); if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1) return true; if (order == 'c') { for (int i = 1; i < rank; i++) if (strides[i-1] <= strides[i]) return false; return true; } else if (order == 'f') { for (int i = 1; i < rank; i++) if (strides[i-1] >= strides[i]) return false; return true; } else { printf("Unknown order for array!\n"); return false; } } INLINEDEF _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo) { return (order(shapeInfo) == 'c') && (elementWiseStride(shapeInfo) > 0); } ////////////////////////////////////////////////////////////////////////// // copy-past from java hasDefaultStridesForShape function INLINEDEF _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo) { const int rank = shape::rank(shapeInfo); if(rank == 0) return true; if(!strideDescendingCAscendingF(shapeInfo)) return false; Nd4jLong defaultShapeInfo[MAX_SHAPEINFOLENGTH]; memcpy(defaultShapeInfo, shapeInfo, shape::shapeInfoByteLength(shapeInfo)); shape::updateStrides(defaultShapeInfo, shape::order(shapeInfo)); bool result = true; for(int i = rank+1; i <= 2*rank; ++i) if(defaultShapeInfo[i] != shapeInfo[i]) { result = false; break; } return result; } INLINEDEF _CUDA_H bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) { int oldnd; Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; Nd4jLong* newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = shape::elementWiseStride(oldShape); } if (isFOrder && ni >= 1) { last_stride *= newShapeOf[ni - 1]; } for (nk = ni; nk < newRank; nk++) { newStrides[nk] = last_stride; } target[0] = newRank; int cnt = 1; for (int e = 0; e < newRank; e++) target[cnt++] = newShapeOf[e]; for (int e = 0; e < newRank; e++) target[cnt++] = newStrides[e]; target[shape::shapeInfoLength(newRank) - 3] = 0; target[shape::shapeInfoLength(newRank) - 2] = -1; target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99; nd4j::ArrayOptions::setDataType(target, nd4j::ArrayOptions::dataType(oldShape)); delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) { int oldnd; Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; auto newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also it sorts input array of dimensions, this operation is also necessary for creating TAD object INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) { int dimSize = dimensions.size(); if(dimSize == 0) throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!"); // check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim| for(auto& dim : dimensions) if(dim < 0) dim += rank; // sort input array of dimensions, this operation is also necessary for creating TAD object in external methods if (dimSize > 1) { std::sort(dimensions.begin(), dimensions.end()); // remove duplicates if they are present dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end()); } // check whether number of dimensions is to big (>rank) dimSize = dimensions.size(); if(dimSize > rank) throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!"); // check if min dimension is still negative and whether max dimension is bigger then rank-1 if(dimensions[0] < 0 || dimensions.back() > (rank-1)) throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !"); } // return absolute index of array min, min is sub-array of max, index to be returned is min's index and corresponds to maxIdx of max array INLINEDEF _CUDA_H Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx) { const auto rankMax = shape::rank(maxShapeInfo); const auto rankMin = shape::rank(minShapeInfo); Nd4jLong idxPerRank[MAX_RANK]; ind2subC(rankMax, const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<int&>(maxIdx), idxPerRank); Nd4jLong minIdx = 0; for(int i = 0; i < rankMin; ++i) { if(minShapeInfo[rankMin - i] == 1 || idxPerRank[rankMax - i - 1] == 0) continue; if(idxPerRank[rankMax - i - 1] >= minShapeInfo[rankMin - i]) idxPerRank[rankMax - i - 1] %= minShapeInfo[rankMin - i]; minIdx += idxPerRank[rankMax - i - 1] * stride(const_cast<Nd4jLong*>(minShapeInfo))[rankMin - i - 1]; } return minIdx; } INLINEDEF _CUDA_HD void shapeOldScalar(nd4j::DataType dataType, Nd4jLong* const buffer, const char order) { buffer[0] = 2; buffer[1] = 1; buffer[2] = 1; buffer[3] = 1; buffer[4] = 1; buffer[6] = 1; buffer[7] = (int)order; nd4j::ArrayOptions::setDataType(buffer, dataType); } template <typename T1, typename T2> INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) { for (Nd4jLong e = 0; e < length; e++) to[e] = (T2) from[e]; }; } #endif /* SHAPE_H_ */
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantumScale*alpha * QuantumScale*beta; opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also define that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; if ((source_image->alpha_trait == UndefinedPixelTrait) && (image->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception); status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++) { PixelChannel channel = GetPixelChannelChannel(source_image,i); PixelTrait source_traits = GetPixelChannelTraits(source_image, channel); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((source_traits == UndefinedPixelTrait) || (traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; double angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=2.0*geometry_info.rho; height=width; if ((flags & HeightValue) != 0) height=2.0*geometry_info.sigma; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; if ((flags & XValue) != 0 ) { MagickRealType angle; /* Rotate vectors if a rotation angle is given. */ angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { /* Lets set a angle range and calculate in the loop. */ angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* Perform the variable blurring of each pixel in image. */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); status=InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, DcaDa, Sa, SaSca, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case FreezeCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case InterpolateCompositeOp: case LightenCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case MultiplyCompositeOp: case NegateCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ReflectCompositeOp: case ScreenCompositeOp: case SoftBurnCompositeOp: case SoftDodgeCompositeOp: case SoftLightCompositeOp: case StampCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } case ModulusAddCompositeOp: { if ((Sa+Da) <= 1.0) { alpha=(Sa+Da); break; } alpha=((Sa+Da)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sa-Da) >= 0.0) { alpha=(Sa-Da); break; } alpha=((Sa-Da)+1.0); break; } default: { alpha=1.0; break; } } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits = GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((channel == AlphaPixelChannel) && ((traits & UpdatePixelTrait) != 0)) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case BlurCompositeOp: case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case DifferenceCompositeOp: { pixel=QuantumRange*fabs(Sa-Da); break; } case FreezeCompositeOp: { pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)* PerceptibleReciprocal(Da)); if (pixel < 0.0) pixel=0.0; break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25* cos(MagickPI*Da)); break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } case MultiplyCompositeOp: { pixel=QuantumRange*Sa*Da; break; } case NegateCompositeOp: { pixel=QuantumRange*((1.0-Sa-Da)); break; } case ReflectCompositeOp: { pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case StampCompositeOp: { pixel=QuantumRange*(Sa+Da*Da-1.0); break; } case StereoCompositeOp: { pixel=QuantumRange*(Sa+Da)/2; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } if (source_traits == UndefinedPixelTrait) continue; /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Dc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; SaSca=Sa*PerceptibleReciprocal(Sca); DcaDa=Dca*PerceptibleReciprocal(Da); switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)* SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) GetPixelBlack(source_image,p); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*gamma*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case FreezeCompositeOp: { pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)* PerceptibleReciprocal(Dca)); if (pixel < 0.0) pixel=0.0; break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25* cos(MagickPI*Dca)); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { if ((Sca+Dca) <= 1.0) { pixel=QuantumRange*(Sca+Dca); break; } pixel=QuantumRange*((Sca+Dca)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sca-Dca) >= 0.0) { pixel=QuantumRange*(Sca-Dca); break; } pixel=QuantumRange*((Sca-Dca)+1.0); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case NegateCompositeOp: { pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*Sca; break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case ReflectCompositeOp: { pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftBurnCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)* PerceptibleReciprocal(Dca)); break; } case SoftDodgeCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)* PerceptibleReciprocal(Sca)); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa* (4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)- DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case StampCompositeOp: { pixel=QuantumRange*(Sca+Dca*Dca-1.0); break; } case StereoCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)* PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0* (Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
integral_parallel1.c
#include<stdio.h> #include<omp.h> #define NUM_THREADS 4 static long num_steps = 100000; double step; int main(){ int i, nthreads; double pi, sum[NUM_THREADS], init_time, finish_time; step = 1.0 / (double)num_steps; init_time = omp_get_wtime(); omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, id, nthrds; double x; id = omp_get_thread_num(); nthrds = omp_get_num_threads(); if (id == 0) nthreads = nthrds; for (i=id, sum[id]=0.0 ; i<num_steps ; i=i+nthrds){ x = (i+0.5)*step; sum[id] += 4.0/(1.0+x*x); } } finish_time = omp_get_wtime()-init_time; for (i=0, pi=0.0 ; i < nthreads; i++) pi += sum[i]*step; printf("PI = %f\n", pi); printf("Time = %f\n", finish_time); }
GB_unaryop__minv_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_int64 // op(A') function: GB_tran__minv_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_int64 ( uint32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Updater.h
/* * Copyright 2016 [See AUTHORS file for list of authors] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _UPDATER_ #define _UPDATER_ #include "../DatapointPartitions/DatapointPartitions.h" #include "../Gradient/Gradient.h" // Some macros to declare extra thread-local / global 1d/2d vectors. // This avoids the use of std::maps, which are very inefficient. // Gives around a 2-3x speedup over using maps. #define REGISTER_THREAD_LOCAL_1D_VECTOR(NAME) std::vector<std::vector<double> > NAME ## _LOCAL_ #define REGISTER_THREAD_LOCAL_2D_VECTOR(NAME) std::vector<std::vector<std::vector<double> > > NAME ## _LOCAL_ #define INITIALIZE_THREAD_LOCAL_1D_VECTOR(NAME, N_COLUMNS) {NAME##_LOCAL_.resize(FLAGS_n_threads); for (int i = 0; i < FLAGS_n_threads; i++) NAME ## _LOCAL_[i].resize(N_COLUMNS, 0);} #define INITIALIZE_THREAD_LOCAL_2D_VECTOR(NAME, N_ROWS, N_COLUMNS) {NAME##_LOCAL_.resize(FLAGS_n_threads); for (int i = 0; i < FLAGS_n_threads; i++) NAME ## _LOCAL_[i].resize(N_ROWS, std::vector<double>(N_COLUMNS, 0));} #define GET_THREAD_LOCAL_VECTOR(NAME) NAME ## _LOCAL_[omp_get_thread_num()] #define REGISTER_GLOBAL_1D_VECTOR(NAME) std::vector<double> NAME ## _GLOBAL_ #define REGISTER_GLOBAL_2D_VECTOR(NAME) std::vector<std::vector<double> > NAME ## _GLOBAL_ #define INITIALIZE_GLOBAL_1D_VECTOR(NAME, N_COLUMNS) {NAME ## _GLOBAL_.resize(N_COLUMNS, 0);} #define INITIALIZE_GLOBAL_2D_VECTOR(NAME, N_ROWS, N_COLUMNS) {NAME ## _GLOBAL_.resize(N_ROWS, std::vector<double>(N_COLUMNS, 0));} #define GET_GLOBAL_VECTOR(NAME) NAME ## _GLOBAL_ #define REGISTER_THREAD_LOCAL_DOUBLE(NAME) std::vector<double > NAME ## _LOCAL_ #define INITIALIZE_THREAD_LOCAL_DOUBLE(NAME) {NAME##_LOCAL_.resize(FLAGS_n_threads); std::fill(NAME##_LOCAL_.begin(), NAME##_LOCAL_.end(), 0);} class Updater { protected: // Keep a reference of the model and datapoints, and partition ordering. Model *model; std::vector<Datapoint *> datapoints; DatapointPartitions *datapoint_partitions; // Have an array of Gradient objects (stores extra info for Model processing). // Have 1 per thread to avoid conflicts. Gradient *thread_gradients; std::vector<int> bookkeeping; // A reference to all_coordinates, which indexes all the coordinates of the model. std::vector<int> all_coordinates; // H, Nu and Mu for updates. virtual double H(int coordinate, int index_into_coordinate_vector) = 0; virtual double Nu(int coordinate, int index_into_coordinate_vector) = 0; virtual double Mu(int coordinate) = 0; // After calling PrepareNu/Mu/H, for the given coordinates, we expect that // calls to Nu/Mu/H are ready. virtual void PrepareNu(std::vector<int> &coordinates) = 0; virtual void PrepareMu(std::vector<int> &coordinates) = 0; virtual void PrepareH(Datapoint *datapoint, Gradient *g) = 0; // By default need catch up. virtual bool NeedCatchUp() { return true; } virtual void ApplyGradient(Datapoint *datapoint) { std::vector<double> &model_data = model->ModelData(); int coordinate_size = model->CoordinateSize(); for (int i = 0; i < datapoint->GetCoordinates().size(); i++) { int index = datapoint->GetCoordinates()[i]; double mu = Mu(index); for (int j = 0; j < coordinate_size; j++) { model_data[index * coordinate_size + j] = (1 - mu) * model_data[index * coordinate_size + j] - Nu(index, j) + H(index, j); } } } virtual void CatchUp(int index, int diff) { if (!NeedCatchUp()) return; if (diff < 0) diff = 0; double geom_sum = 0; double mu = Mu(index); if (mu != 0) { geom_sum = ((1 - pow(1 - mu, diff+1)) / (1 - (1 - mu))) - 1; } for (int j = 0; j < model->CoordinateSize(); j++) { model->ModelData()[index * model->CoordinateSize() + j] = pow(1 - mu, diff) * model->ModelData()[index * model->CoordinateSize() + j] - Nu(index, j) * geom_sum; } } virtual void CatchUpDatapoint(Datapoint *datapoint) { std::vector<double> &model_data = model->ModelData(); int coordinate_size = model->CoordinateSize(); for (int i = 0; i < datapoint->GetCoordinates().size(); i++) { int index = datapoint->GetCoordinates()[i]; int diff = datapoint->GetOrder() - bookkeeping[index] - 1; CatchUp(index, diff); } } virtual void FinalCatchUp() { int coordinate_size = model->CoordinateSize(); std::vector<double> &model_data = model->ModelData(); #pragma omp parallel num_threads(FLAGS_n_threads) { PrepareNu(all_coordinates); PrepareMu(all_coordinates); #pragma omp for for (int i = 0; i < model->NumParameters(); i++) { int diff = model->NumParameters() - bookkeeping[i]; CatchUp(i, diff); } } } public: Updater(Model *model, std::vector<Datapoint *> &datapoints) { // Create gradients for each thread. thread_gradients = new Gradient[FLAGS_n_threads]; for (int thread = 0; thread < FLAGS_n_threads; thread++) { thread_gradients[thread] = Gradient(); } this->model = model; // Set up bookkeping. this->datapoints = datapoints; for (int i = 0; i < model->NumParameters(); i++) { bookkeeping.push_back(0); } // Keep an array that has integers 1...n_coords. for (int i = 0; i < model->NumParameters(); i++) { all_coordinates.push_back(i); } } Updater() {} virtual ~Updater() { delete [] thread_gradients; } // Could be useful to get partitioning info. virtual void SetUpWithPartitions(DatapointPartitions &partitions) { datapoint_partitions = &partitions; } // Main update method, which is run by multiple threads. virtual void Update(Model *model, Datapoint *datapoint) { int thread_num = omp_get_thread_num(); thread_gradients[thread_num].Clear(); thread_gradients[thread_num].datapoint = datapoint; // First prepare Nu and Mu for catchup since they are independent of the the model. PrepareNu(datapoint->GetCoordinates()); PrepareMu(datapoint->GetCoordinates()); CatchUpDatapoint(datapoint); // After catching up, prepare H and apply the gradient. PrepareH(datapoint, &thread_gradients[thread_num]); ApplyGradient(datapoint); // Update bookkeeping. for (const auto &coordinate : datapoint->GetCoordinates()) { bookkeeping[coordinate] = datapoint->GetOrder(); } } // Called before epoch begins. virtual void EpochBegin() { } // Called when the epoch ends. virtual void EpochFinish() { FinalCatchUp(); std::fill(bookkeeping.begin(), bookkeeping.end(), 0); } }; #endif
GB_binop__eq_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__eq_int16 // A.*B function (eWiseMult): GB_AemultB__eq_int16 // A*D function (colscale): GB_AxD__eq_int16 // D*A function (rowscale): GB_DxB__eq_int16 // C+=B function (dense accum): GB_Cdense_accumB__eq_int16 // C+=b function (dense accum): GB_Cdense_accumb__eq_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_int16 // C=scalar+B GB_bind1st__eq_int16 // C=scalar+B' GB_bind1st_tran__eq_int16 // C=A+scalar GB_bind2nd__eq_int16 // C=A'+scalar GB_bind2nd_tran__eq_int16 // C type: bool // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT16 || GxB_NO_EQ_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__eq_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__eq_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__eq_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__eq_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__eq_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__eq_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__eq_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__eq_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__eq_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__eq_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__eq_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
degree.h
#pragma once #ifndef DEGREEORDERCSR_H #define DEGREEORDERCSR_H #include "../general.h" #include <cstdlib> #include <functional> namespace PpParallel { // Helper function. // // This code is generic over both CGraph and SGraph. template <class AnyGraph> inline bool compare_degree(const AnyGraph *graph, NodeId v, NodeId w) { int64_t degV = graph->out_degree(v); int64_t degW = graph->out_degree(w); return !(degV > degW || (degV == degW && v > w)); } // This code is generic over both CGraph and SGraph. template <class AnyGraph, bool useRankFormat = false, class Output = std::vector<NodeId>> void getDegreeOrdering(const AnyGraph &graph, Output &res) { using namespace std::placeholders; auto n = graph.num_nodes(); res.resize(n); if constexpr (useRankFormat) { //Produce result in Rank-Format std::vector<NodeId> temp(n); std::iota(temp.begin(), temp.end(), 0); #ifdef _OPENMP __gnu_parallel::sort(temp.begin(), temp.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2)); #else std::sort(temp.begin(), temp.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2)); #endif #pragma omp parallel for schedule(static, 64) for (int i = 0; i < n; i++) res[temp[i]] = i; } else { //Produce result in Order-Format std::iota(res.begin(), res.end(), 0); #ifdef _OPENMP __gnu_parallel::sort(res.begin(), res.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2)); #else std::sort(res.begin(), res.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2)); #endif } } } // namespace PpParallel #endif
ranksort.c
#include <stdio.h> #include <time.h> #include <stdlib.h> #include <time.h> #include <omp.h> void print_array(int* array, int n) { for (int i = 0; i < n; i++) printf("%d ", array[i]); printf("\n"); } int* fisher_yates_shuffle(int* random_array, int n) { int last_index = n; while(last_index-->1) { int swap_index = rand() % last_index; int last_value = random_array[last_index]; random_array[last_index] = random_array[swap_index]; random_array[swap_index] = last_value; } return random_array; } int* gen_random_unsorted_array(int n) { int* random_array = malloc(sizeof(int) * n); // taken behaves as a hash table // where the key is the index for (int i = 0; i < n; i++) random_array[i] = i; // print_array(random_array, n); return fisher_yates_shuffle(random_array, n); } int find_rank(int* array, int n, int x) { int rank = 0; for(int i = 0; i < n; i++) if(array[i] < x) rank++; return rank; } int* ranksort(int* array, int n) { int* sorted_array = malloc(sizeof(int) * n); #pragma omp parallel for schedule(dynamic) for(int i = 0; i < n; i++) { int value = array[i]; int rank = find_rank(array, n, value); sorted_array[rank] = value; } return sorted_array; } int main(int argc, char** argv) { srand(time(NULL)); int n; if (argc < 2) { puts("Passe o valor de n como argumento da linha de comando"); printf("%s <n>\n", argv[0]); } n = atoi(argv[1]); int *random_array = gen_random_unsorted_array(n); // print_array(random_array, n); double start_time = omp_get_wtime(); int* sorted_array = ranksort(random_array, n); double end_time = omp_get_wtime(); // print_array(sorted_array, n); printf("Demorou %.4f segundos para ordenar %d números\n", end_time - start_time, n); return 0; }
nr_numint.c
/* Copyright 2014-2020 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <assert.h> #include "config.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" #define BOXSIZE 56 int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice, int *ao_loc) { if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) { return 0; } const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int bas_id; int box_id = 0; int bound = BOXSIZE; int has0 = 0; empty[box_id] = 1; for (bas_id = sh0; bas_id < sh1; bas_id++) { empty[box_id] &= !non0table[bas_id]; if (ao_loc[bas_id] == bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = 1; } else if (ao_loc[bas_id] > bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = !non0table[bas_id]; } } return has0; } static void dot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int bgrids, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; double beta = 0; if (has0) { int box_id, blen, i, j; size_t b0; for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen, &D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc, &beta, vm, &ngrids); beta = 1.0; } } if (beta == 0) { // all empty for (i = 0; i < nocc; i++) { for (j = 0; j < bgrids; j++) { vm[i*ngrids+j] = 0; } } } } else { dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao, &D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids); } } /* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */ void VXCdot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int nbas, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel { int ip, ib; #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_dm(vm+ip, ao+ip, dm, nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE), non0table+ib*nbas, shls_slice, ao_loc); } } } /* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */ static void dot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int bgrids, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; if (has0) { int ib, jb, leni, lenj; int j1 = nbox; size_t b0i, b0j; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); if (hermi) { j1 = ib + 1; } for (jb = 0; jb < j1; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1, ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids, &D1, vv+b0i*nao+b0j, &nao); } } } } } else { dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids, &D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao); } } /* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */ void VXCdot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int nbas, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; size_t Nao = nao; NPdset0(vv, Nao * Nao); #pragma omp parallel { int ip, ib; double *v_priv = calloc(nao*nao+2, sizeof(double)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_ao(v_priv, ao1+ip, ao2+ip, nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi, non0table+ib*nbas, shls_slice, ao_loc); } #pragma omp critical { for (ip = 0; ip < nao*nao; ip++) { vv[ip] += v_priv[ip]; } } free(v_priv); } if (hermi != 0) { NPdsymm_triu(nao, vv, hermi); } } // 'nip,np->ip' void VXC_dscale_ao(double *aow, double *ao, double *wv, int comp, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; size_t ao_size = nao * Ngrids; int i, j, ic; double *pao = ao; #pragma omp for schedule(static) for (i = 0; i < nao; i++) { pao = ao + i * Ngrids; for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] = pao[j] * wv[j]; } for (ic = 1; ic < comp; ic++) { for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j]; } } } } } // 'ip,ip->p' void VXC_dcontract_rho(double *rho, double *bra, double *ket, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; int nthread = omp_get_num_threads(); int blksize = MAX((Ngrids+nthread-1) / nthread, 1); int ib, b0, b1, i, j; #pragma omp for for (ib = 0; ib < nthread; ib++) { b0 = ib * blksize; b1 = MIN(b0 + blksize, ngrids); for (j = b0; j < b1; j++) { rho[j] = bra[j] * ket[j]; } for (i = 1; i < nao; i++) { for (j = b0; j < b1; j++) { rho[j] += bra[i*Ngrids+j] * ket[i*Ngrids+j]; } } } } } void VXC_vv10nlc(double *Fvec, double *Uvec, double *Wvec, double *vvcoords, double *coords, double *W0p, double *W0, double *K, double *Kp, double *RpW, int vvngrids, int ngrids) { #pragma omp parallel { double DX, DY, DZ, R2; double gp, g, gt, T, F, U, W; int i, j; #pragma omp for schedule(static) for (i = 0; i < ngrids; i++) { F = 0; U = 0; W = 0; for (j = 0; j < vvngrids; j++) { DX = vvcoords[j*3+0] - coords[i*3+0]; DY = vvcoords[j*3+1] - coords[i*3+1]; DZ = vvcoords[j*3+2] - coords[i*3+2]; R2 = DX*DX + DY*DY + DZ*DZ; gp = R2*W0p[j] + Kp[j]; g = R2*W0[i] + K[i]; gt = g + gp; T = RpW[j] / (g*gp*gt); F += T; T *= 1./g + 1./gt; U += T; W += T * R2; } Fvec[i] = F * -1.5; Uvec[i] = U; Wvec[i] = W; } } }
GB_binop__bget_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bget_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bget_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint64) // C=scalar+B GB (_bind1st__bget_uint64) // C=scalar+B' GB (_bind1st_tran__bget_uint64) // C=A+scalar GB (_bind2nd__bget_uint64) // C=A'+scalar GB (_bind2nd_tran__bget_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, uint64_t, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT64 || GxB_NO_BGET_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bget_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
turing.c
#include <stdio.h> #include <stdlib.h> #include "turing.h" #include "common.h" tTransTableItem * getTransition(tTransitions * t, int state, signed char symbol) { if (state < t->states && symbol < t->symbols) return t->table + state*t->symbols + symbol; else { printf("Critical error!\nTrying to get state=%d, symbol=%d, but trans. table size has %d states and %d symbols\n", state, symbol, t->states, t->symbols); exit(-1); } } char * shifts[] = {"R", "L", "RR", "N"}; #pragma omp threadprivate(shifts) inline char * shift2str(tShift shift) { return shifts[shift]; } void turing(tTape * tape, tTransitions * t, int max_steps, tStatus * status) { signed char symbol; int head=1; //turing read/write head position=2nd symbol (our tapes must begin with BLANK symbol) while (status->steps < max_steps && status->state < t->states) { tTransTableItem * trans; status->steps++; symbol=tape->content[head]; //this variable is good only for debugging... trans=getTransition(t, status->state, symbol); status->state=trans->state; if (trans->symbol >= 0) { tape->content[head]=trans->symbol; status->writes++; if (head>status->head_max) status->head_max=head; } switch (trans->shift) { case L: head--; break; case R: head++; break; case RR: head+=2; break; } if (head<0 || head>=TAPE_LEN) { if (log_level>=LOG_DEBUG_3) fprintf(stderr, "Head out of bounds!\n"); status->error=ERR_BOUNDS; return; } } // while(...) - the main loop; }
gemm_x_dia_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number *Y = &y[index2(cc, 0, ldy)]; const ALPHA_Number *X = &x[index2(cc, 0, ldx)]; for (ALPHA_INT r = 0; r < mat->rows; ++r) alpha_mul(Y[r],Y[r],beta); for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul(val,alpha,mat->values[index2(di,ar,mat->lval)]); alpha_madde(Y[ar],val,X[ac]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
opencl_tc_fmt_plug.c
/* * TrueCrypt volume OpenCL support to John The Ripper (RIPEMD-160 only) * * Based on CPU format originally written by Alain Espinosa <alainesp at * gmail.com> in 2012. * Copyright (c) 2015, magnum * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if HAVE_OPENCL #define FMT_STRUCT fmt_ocl_tc #if FMT_EXTERNS_H extern struct fmt_main FMT_STRUCT; #elif FMT_REGISTERS_H john_register_one(&FMT_STRUCT); #else #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "options.h" #include "formats.h" #include "crc32.h" #include "johnswap.h" #include "aes.h" #include "pbkdf2_hmac_ripemd160.h" #include "loader.h" #include "common-opencl.h" #define FORMAT_LABEL "truecrypt-opencl" #define FORMAT_NAME "TrueCrypt AES256_XTS" #define ALGORITHM_NAME "RIPEMD160 OpenCL" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 /* 64 is the actual maximum used by Truecrypt software as of version 7.1a */ #define PLAINTEXT_LENGTH 64 #define MAX_CIPHERTEXT_LENGTH (512*2+32) #define SALT_SIZE sizeof(struct cust_salt) #define SALT_ALIGN 4 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define TAG_RIPEMD160 "truecrypt_RIPEMD_160$" #define TAG_RIPEMD160_LEN (sizeof(TAG_RIPEMD160)-1) #define IS_RIPEMD160 2 #define MAX_PASSSZ 64 #define PASS_BUFSZ 256 #define KPOOL_SZ 64 #define MAX_KFILE_SZ 1048576 /* 1 MB */ #define MAX_KEYFILES 256 static unsigned char (*first_block_dec)[16]; unsigned char (*keyfiles_data)[MAX_KFILE_SZ]; int (*keyfiles_length); #define KEYLEN PLAINTEXT_LENGTH #define OUTLEN 64 #define SALTLEN 64 typedef struct { unsigned int length; unsigned char v[KEYLEN]; } pbkdf2_password; typedef struct { unsigned int v[(OUTLEN+3)/4]; } pbkdf2_hash; typedef struct { unsigned char salt[SALTLEN]; } pbkdf2_salt; struct cust_salt { unsigned char salt[64]; unsigned char bin[512-64]; int loop_inc; int num_iterations; int hash_type; int nkeyfiles; } *psalt; static struct fmt_tests tests_ripemd160[] = { {"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" }, {"truecrypt_RIPEMD_160$6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" }, {NULL} }; static cl_int cl_error; static pbkdf2_password *inbuffer; static pbkdf2_hash *outbuffer; static pbkdf2_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(pbkdf2_password) * gws; outsize = sizeof(pbkdf2_hash) * gws; settingsize = sizeof(pbkdf2_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); first_block_dec = mem_calloc(gws, sizeof(*first_block_dec)); keyfiles_data = mem_calloc(MAX_KEYFILES, sizeof(*keyfiles_data)); keyfiles_length = mem_calloc(MAX_KEYFILES, sizeof(int)); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(first_block_dec); MEM_FREE(keyfiles_data); MEM_FREE(keyfiles_length); } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_ripemd160_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "pbkdf2_ripemd160", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(pbkdf2_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char* ciphertext, struct fmt_main *self) { unsigned int i; char *p, *q; int nkeyfiles = -1; if (strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN)) return 0; /* handle 'chopped' .pot lines */ if (ldr_isa_pot_source(ciphertext)) return 1; ciphertext += TAG_RIPEMD160_LEN; p = ciphertext; q = strchr(p, '$'); if (!q) { /* no keyfiles */ if (strlen(ciphertext) != 512*2) return 0; } else { if (q - p != 512 * 2) return 0; /* check keyfile(s) */ p = q + 1; nkeyfiles = atoi(p); if (nkeyfiles > MAX_KEYFILES || nkeyfiles < 1) return 0; } for (i = 0; i < 512*2; i++) { if (atoi16l[ARCH_INDEX(ciphertext[i])] == 0x7F) return 0; } return 1; } static void set_salt(void *salt) { psalt = salt; memcpy((char*)currentsalt.salt, psalt->salt, SALTLEN); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static void* get_salt(char *ciphertext) { static char buf[sizeof(struct cust_salt)+4]; struct cust_salt *s = (struct cust_salt*)mem_align(buf, 4); unsigned int i; char tpath[PATH_BUFFER_SIZE] = { 0 }; char *p, *q; int idx; FILE *fp; size_t sz; memset(s, 0, sizeof(struct cust_salt)); s->loop_inc = 1; ciphertext += TAG_RIPEMD160_LEN; s->hash_type = IS_RIPEMD160; s->num_iterations = 2000; // Convert the hexadecimal salt in binary for (i = 0; i < 64; i++) s->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])]; for (; i < 512; i++) s->bin[i-64] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])]; p = ciphertext; q = strchr(p, '$'); if (!q) /* no keyfiles */ return s; // process keyfile(s) p = q + 1; s->nkeyfiles = atoi(p); for (idx = 0; idx < s->nkeyfiles; idx++) { p = strchr(p, '$') + 1; // at first filename q = strchr(p, '$'); if (!q) { // last file memset(tpath, 0, sizeof(tpath) - 1); strncpy(tpath, p, sizeof(tpath)); } else { memset(tpath, 0, sizeof(tpath) - 1); strncpy(tpath, p, q-p); } /* read this into keyfiles_data[idx] */ fp = fopen(tpath, "rb"); if (!fp) pexit("fopen %s", p); if (fseek(fp, 0L, SEEK_END) == -1) pexit("fseek"); sz = ftell(fp); if (fseek(fp, 0L, SEEK_SET) == -1) pexit("fseek"); if (fread(keyfiles_data[idx], 1, sz, fp) != sz) pexit("fread"); keyfiles_length[idx] = sz; fclose(fp); } return s; } static void AES_256_XTS_first_sector(const unsigned char *double_key, unsigned char *out, const unsigned char *data, unsigned len) { unsigned char tweak[16] = { 0 }; unsigned char buf[16]; int i, j, cnt; AES_KEY key1, key2; AES_set_decrypt_key(double_key, 256, &key1); AES_set_encrypt_key(&double_key[32], 256, &key2); // first aes tweak (we do it right over tweak AES_encrypt(tweak, tweak, &key2); cnt = len/16; for (j=0;;) { for (i = 0; i < 16; ++i) buf[i] = data[i]^tweak[i]; AES_decrypt(buf, out, &key1); for (i = 0; i < 16; ++i) out[i]^=tweak[i]; ++j; if (j == cnt) break; else { unsigned char Cin, Cout; unsigned x; Cin = 0; for (x = 0; x < 16; ++x) { Cout = (tweak[x] >> 7) & 1; tweak[x] = ((tweak[x] << 1) + Cin) & 0xFF; Cin = Cout; } if (Cout) tweak[0] ^= 135; //GF_128_FDBK; } data += 16; out += 16; } } static int apply_keyfiles(unsigned char *pass, size_t pass_memsz, int nkeyfiles) { int pl, k; unsigned char *kpool; unsigned char *kdata; int kpool_idx; size_t i, kdata_sz; uint32_t crc; if (pass_memsz < MAX_PASSSZ) { error(); } pl = strlen((char*)pass); memset(pass+pl, 0, MAX_PASSSZ-pl); if ((kpool = mem_calloc(1, KPOOL_SZ)) == NULL) { error(); } for (k = 0; k < nkeyfiles; k++) { kpool_idx = 0; kdata_sz = keyfiles_length[k]; kdata = keyfiles_data[k]; crc = ~0U; for (i = 0; i < kdata_sz; i++) { crc = jtr_crc32(crc, kdata[i]); kpool[kpool_idx++] += (unsigned char)(crc >> 24); kpool[kpool_idx++] += (unsigned char)(crc >> 16); kpool[kpool_idx++] += (unsigned char)(crc >> 8); kpool[kpool_idx++] += (unsigned char)(crc); /* Wrap around */ if (kpool_idx == KPOOL_SZ) kpool_idx = 0; } } /* Apply keyfile pool to passphrase */ for (i = 0; i < KPOOL_SZ; i++) pass[i] += kpool[i]; MEM_FREE(kpool); return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { int i; const int count = *pcount; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (psalt->nkeyfiles) { #if _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { apply_keyfiles(inbuffer[i].v, 64, psalt->nkeyfiles); inbuffer[i].length = 64; } } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #if _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { AES_256_XTS_first_sector((unsigned char*)outbuffer[i].v, first_block_dec[i], psalt->bin, 16); } return count; } static int cmp_all(void* binary, int count) { int i; for (i = 0; i < count; ++i) { if (!memcmp(first_block_dec[i], "TRUE", 4)) return 1; } return 0; } static int cmp_one(void* binary, int index) { if (!memcmp(first_block_dec[index], "TRUE", 4)) return 1; return 0; } static int cmp_crc32s(unsigned char *given_crc32, CRC32_t comp_crc32) { return given_crc32[0] == ((comp_crc32>>24)&0xFF) && given_crc32[1] == ((comp_crc32>>16)&0xFF) && given_crc32[2] == ((comp_crc32>> 8)&0xFF) && given_crc32[3] == ((comp_crc32>> 0)&0xFF); } static int cmp_exact(char *source, int idx) { unsigned char key[64]; unsigned char decr_header[512-64]; CRC32_t check_sum; int ksz = inbuffer[idx].length; memcpy(key, inbuffer[idx].v, inbuffer[idx].length); /* process keyfile(s) */ if (psalt->nkeyfiles) { apply_keyfiles(key, 64, psalt->nkeyfiles); ksz = 64; } pbkdf2_ripemd160(key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0); AES_256_XTS_first_sector(key, decr_header, psalt->bin, 512-64); if (memcmp(decr_header, "TRUE", 4)) return 0; CRC32_Init(&check_sum); CRC32_Update(&check_sum, &decr_header[256-64], 256); if (!cmp_crc32s(&decr_header[8], ~check_sum)) return 0; CRC32_Init(&check_sum); CRC32_Update(&check_sum, decr_header, 256-64-4); if (!cmp_crc32s(&decr_header[256-64-4], ~check_sum)) return 0; return 1; } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int salt_hash(void *salt) { unsigned v=0, i; struct cust_salt *psalt = (struct cust_salt*)salt; for (i = 0; i < 64; ++i) { v *= 11; v += psalt->salt[i]; } return v & (SALT_HASH_SIZE - 1); } struct fmt_main FMT_STRUCT = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, tests_ripemd160 }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_binop__pow_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_fc64 // A.*B function (eWiseMult): GB_AemultB__pow_fc64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_fc64 // C+=b function (dense accum): GB_Cdense_accumb__pow_fc64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fc64 // C=scalar+B GB_bind1st__pow_fc64 // C=scalar+B' GB_bind1st_tran__pow_fc64 // C=A+scalar GB_bind2nd__pow_fc64 // C=A'+scalar GB_bind2nd_tran__pow_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_cpow (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_cpow (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FC64 || GxB_NO_POW_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__pow_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_cpow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_fc64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_cpow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_cpow (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_cpow (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dft_ground_state.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file dft_ground_state.h * * \brief Contains definition and partial implementation of sirius::DFT_ground_state class. */ #ifndef __DFT_GROUND_STATE_H__ #define __DFT_GROUND_STATE_H__ #include "potential.h" #include "density.h" #include "k_point_set.h" #include "Geometry/force.hpp" #include "Geometry/stress.hpp" #include "json.hpp" #include "hubbard.hpp" using json = nlohmann::json; namespace sirius { class DFT_ground_state { private: Simulation_context& ctx_; Unit_cell& unit_cell_; Hamiltonian &H_; Potential& potential_; Density& density_; K_point_set& kset_; Band band_; double ewald_energy_{0}; /// Compute the ion-ion electrostatic energy using Ewald method. /** The following contribution (per unit cell) to the total energy has to be computed: * \f[ * E^{ion-ion} = \frac{1}{N} \frac{1}{2} \sum_{i \neq j} \frac{Z_i Z_j}{|{\bf r}_i - {\bf r}_j|} = * \frac{1}{2} \sideset{}{'} \sum_{\alpha \beta {\bf T}} \frac{Z_{\alpha} Z_{\beta}}{|{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|} * \f] * where \f$ N \f$ is the number of unit cells in the crystal. * Following the idea of Ewald the Coulomb interaction is split into two terms: * \f[ * \frac{1}{|{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|} = * \frac{{\rm erf}(\sqrt{\lambda} |{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|)}{|{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|} + * \frac{{\rm erfc}(\sqrt{\lambda} |{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|)}{|{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|} * \f] * Second term is computed directly. First term is computed in the reciprocal space. Remembering that * \f[ * \frac{1}{\Omega} \sum_{\bf G} e^{i{\bf Gr}} = \sum_{\bf T} \delta({\bf r - T}) * \f] * we rewrite the first term as * \f[ * \frac{1}{2} \sideset{}{'} \sum_{\alpha \beta {\bf T}} Z_{\alpha} Z_{\beta} * \frac{{\rm erf}(\sqrt{\lambda} |{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|)}{|{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|} = * \frac{1}{2} \sum_{\alpha \beta {\bf T}} Z_{\alpha} Z_{\beta} \frac{{\rm erf}(\sqrt{\lambda} |{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|)}{|{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|} - * \frac{1}{2} \sum_{\alpha} Z_{\alpha}^2 2 \sqrt{\frac{\lambda}{\pi}} = \\ * \frac{1}{2} \sum_{\alpha \beta} Z_{\alpha} Z_{\beta} \frac{1}{\Omega} \sum_{\bf G} \int e^{i{\bf Gr}} \frac{{\rm erf}(\sqrt{\lambda} |{\bf r}_{\alpha\beta} + {\bf r}|)}{|{\bf r}_{\alpha\beta} + {\bf r}|} d{\bf r} - * \sum_{\alpha} Z_{\alpha}^2 \sqrt{\frac{\lambda}{\pi}} * \f] * The integral is computed using the \f$ \ell=0 \f$ term of the spherical expansion of the plane-wave: * \f[ * \int e^{i{\bf Gr}} \frac{{\rm erf}(\sqrt{\lambda} |{\bf r}_{\alpha\beta} + {\bf r}|)}{|{\bf r}_{\alpha\beta} + {\bf r}|} d{\bf r} = * \int e^{-i{\bf r}_{\alpha \beta}{\bf G}} e^{i{\bf Gr}} \frac{{\rm erf}(\sqrt{\lambda} |{\bf r}|)}{|{\bf r}|} d{\bf r} = * e^{-i{\bf r}_{\alpha \beta}{\bf G}} 4 \pi \int_0^{\infty} \frac{\sin({G r})}{G} {\rm erf}(\sqrt{\lambda} r ) dr * \f] * We will split integral in two parts: * \f[ * \int_0^{\infty} \sin({G r}) {\rm erf}(\sqrt{\lambda} r ) dr = \int_0^{b} \sin({G r}) {\rm erf}(\sqrt{\lambda} r ) dr + * \int_b^{\infty} \sin({G r}) dr = \frac{1}{G} e^{-\frac{G^2}{4 \lambda}} * \f] * where \f$ b \f$ is sufficiently large. To reproduce in Mathrmatica: \verbatim Limit[Limit[ Integrate[Sin[g*x]*Erf[Sqrt[nu] * x], {x, 0, b}, Assumptions -> {nu > 0, g >= 0, b > 0}] + Integrate[Sin[g*(x + I*a)], {x, b, \[Infinity]}, Assumptions -> {a > 0, nu > 0, g >= 0, b > 0}], a -> 0], b -> \[Infinity], Assumptions -> {nu > 0, g >= 0}] \endverbatim * The first term of the Ewald sum thus becomes: * \f[ * \frac{2 \pi}{\Omega} \sum_{{\bf G}} \frac{e^{-\frac{G^2}{4 \lambda}}}{G^2} \Big| \sum_{\alpha} Z_{\alpha} e^{-i{\bf r}_{\alpha}{\bf G}} \Big|^2 - * \sum_{\alpha} Z_{\alpha}^2 \sqrt{\frac{\lambda}{\pi}} * \f] * For \f$ G=0 \f$ the following is done: * \f[ * \frac{e^{-\frac{G^2}{4 \lambda}}}{G^2} \approx \frac{1}{G^2}-\frac{1}{4 \lambda } * \f] * The term \f$ \frac{1}{G^2} \f$ is compensated together with the corresponding Hartree terms in electron-electron and * electron-ion interactions (cell should be neutral) and we are left with the following conribution: * \f[ * -\frac{2\pi}{\Omega}\frac{N_{el}^2}{4 \lambda} * \f] * Final expression for the Ewald energy: * \f[ * E^{ion-ion} = \frac{1}{2} \sideset{}{'} \sum_{\alpha \beta {\bf T}} Z_{\alpha} Z_{\beta} * \frac{{\rm erfc}(\sqrt{\lambda} |{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|)}{|{\bf r}_{\alpha} - {\bf r}_{\beta} + {\bf T}|} + * \frac{2 \pi}{\Omega} \sum_{{\bf G}\neq 0} \frac{e^{-\frac{G^2}{4 \lambda}}}{G^2} \Big| \sum_{\alpha} Z_{\alpha} e^{-i{\bf r}_{\alpha}{\bf G}} \Big|^2 - * \sum_{\alpha} Z_{\alpha}^2 \sqrt{\frac{\lambda}{\pi}} - \frac{2\pi}{\Omega}\frac{N_{el}^2}{4 \lambda} * \f] */ double ewald_energy(); /// Compute approximate atomic magnetic moments in case of PW-PP. mdarray<double, 2> compute_atomic_mag_mom() const { PROFILE("sirius::DFT_ground_state::compute_atomic_mag_mom"); /* radius of spheres around each atom where "atomic" magnetic moment is calculated */ double const R = 2.0; mdarray<double, 2> mmom(3, unit_cell_.num_atoms()); mmom.zero(); #pragma omp parallel for for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { for (int j0 = 0; j0 < ctx_.fft().grid().size(0); j0++) { for (int j1 = 0; j1 < ctx_.fft().grid().size(1); j1++) { for (int j2 = 0; j2 < ctx_.fft().local_size_z(); j2++) { /* get real space fractional coordinate */ auto v0 = vector3d<double>(double(j0) / ctx_.fft().grid().size(0), double(j1) / ctx_.fft().grid().size(1), double(ctx_.fft().offset_z() + j2) / ctx_.fft().grid().size(2)); /* index of real space point */ int ir = ctx_.fft().grid().index_by_coord(j0, j1, j2); for (int t0 = -1; t0 <= 1; t0++) { for (int t1 = -1; t1 <= 1; t1++) { for (int t2 = -1; t2 <= 1; t2++) { vector3d<double> v1 = v0 - (unit_cell_.atom(ia).position() + vector3d<double>(t0, t1, t2)); auto r = unit_cell_.get_cartesian_coordinates(v1); auto a = r.length(); if (a <= R) { for (int j = 0; j < ctx_.num_mag_dims(); j++) { mmom(j, ia) += density_.magnetization(j).f_rg(ir); } } } } } } } } for (int j: {0, 1, 2}) { mmom(j, ia) *= (unit_cell_.omega() / ctx_.fft().size()); } } ctx_.fft().comm().allreduce(&mmom(0, 0), static_cast<int>(mmom.size())); return std::move(mmom); } public: DFT_ground_state(Simulation_context& ctx__, Hamiltonian& H__, Density& density__, K_point_set& kset__) : ctx_(ctx__) , unit_cell_(ctx__.unit_cell()) , H_(H__) , potential_(H__.potential()) , density_(density__) , kset_(kset__) , band_(ctx_) { if (!ctx_.full_potential()) { ewald_energy_ = ewald_energy(); } } int find(double potential_tol, double energy_tol, int num_dft_iter, bool write_state); void print_info(); void print_magnetic_moment(); /// Return nucleus energy in the electrostatic field. /** Compute energy of nucleus in the electrostatic potential generated by the total (electrons + nuclei) * charge density. Diverging self-interaction term z*z/|r=0| is excluded. */ double energy_enuc() const { double enuc{0}; if (ctx_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); int zn = unit_cell_.atom(ia).zn(); enuc -= 0.5 * zn * potential_.vh_el(ia); // * y00; } ctx_.comm().allreduce(&enuc, 1); } return enuc; } /// Return eigen-value sum of core states. double core_eval_sum() const { double sum{0}; for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) { sum += unit_cell_.atom_symmetry_class(ic).core_eval_sum() * unit_cell_.atom_symmetry_class(ic).num_atoms(); } return sum; } double energy_vha() { return potential_.energy_vha(); } double energy_vxc() { return potential_.energy_vxc(density_); } double energy_exc() { return potential_.energy_exc(density_); } double energy_bxc() { double ebxc{0}; for (int j = 0; j < ctx_.num_mag_dims(); j++) { ebxc += density_.magnetization(j).inner(potential_.effective_magnetic_field(j)); } return ebxc; } double energy_veff() { return density_.rho().inner(potential_.effective_potential()); } double energy_vloc() { return potential_.local_potential().inner(density_.rho()); } /// Full eigen-value sum (core + valence) double eval_sum() { return (core_eval_sum() + kset_.valence_eval_sum()); } /// Kinetic energy /** more doc here */ double energy_kin() { return (eval_sum() - energy_veff() - energy_bxc()); } double energy_ewald() const { return ewald_energy_; } /// Total energy of the electronic subsystem. /** From the definition of the density functional we have: * \f[ * E[\rho] = T[\rho] + E^{H}[\rho] + E^{XC}[\rho] + E^{ext}[\rho] * \f] * where \f$ T[\rho] \f$ is the kinetic energy, \f$ E^{H}[\rho] \f$ - electrostatic energy of * electron-electron density interaction, \f$ E^{XC}[\rho] \f$ - exchange-correlation energy * and \f$ E^{ext}[\rho] \f$ - energy in the external field of nuclei. * * Electrostatic and external field energies are grouped in the following way: * \f[ * \frac{1}{2} \int \int \frac{\rho({\bf r})\rho({\bf r'}) d{\bf r} d{\bf r'}}{|{\bf r} - {\bf r'}|} + * \int \rho({\bf r}) V^{nuc}({\bf r}) d{\bf r} = \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} + * \frac{1}{2} \int \rho({\bf r}) V^{nuc}({\bf r}) d{\bf r} * \f] * Here \f$ V^{H}({\bf r}) \f$ is the total (electron + nuclei) electrostatic potential returned by the * poisson solver. Next we transform the remaining term: * \f[ * \frac{1}{2} \int \rho({\bf r}) V^{nuc}({\bf r}) d{\bf r} = * \frac{1}{2} \int \int \frac{\rho({\bf r})\rho^{nuc}({\bf r'}) d{\bf r} d{\bf r'}}{|{\bf r} - {\bf r'}|} = * \frac{1}{2} \int V^{H,el}({\bf r}) \rho^{nuc}({\bf r}) d{\bf r} * \f] * * Total energy in PW-PP method has the following expression: * \f[ * E_{tot} = \sum_{i} f_i \langle \psi_i | \Big( \hat T + \sum_{\xi \xi'} |\beta_{\xi} \rangle D_{\xi \xi'}^{ion} \langle \beta_{\xi'} |\Big) | \psi_i \rangle + * \int V^{ion}({\bf r})\rho({\bf r})d{\bf r} + \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} + E^{XC}[\rho + \rho_{core}] * \f] * The following rearrangement is performed: * \f[ * \int V^{ion}({\bf r})\rho({\bf r})d{\bf r} + \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} = \\ * \int V^{ion}({\bf r})\rho({\bf r})d{\bf r} + \int V^{H}({\bf r})\rho({\bf r})d{\bf r} + \int V^{XC}({\bf r})\rho({\bf r})d{\bf r} - * \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} - \int V^{XC}({\bf r})\rho({\bf r})d{\bf r} = \\ * \int V^{eff}({\bf r})\rho({\bf r})d{\bf r} - \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} - \int V^{XC}({\bf r})\rho({\bf r})d{\bf r} = \\ * \int V^{eff}({\bf r})\rho^{ps}({\bf r})d{\bf r} + \int V^{eff}({\bf r})\rho^{aug}({\bf r})d{\bf r} - \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} - \int V^{XC}({\bf r})\rho({\bf r})d{\bf r} * \f] * Where * \f[ * V^{eff}({\bf r}) = V^{H}({\bf r}) + V^{XC}({\bf r}) + V^{ion}({\bf r}) * \f] * Next, we use the definition of \f$ \rho^{aug} \f$: * \f[ * \int V^{eff}({\bf r})\rho^{aug}({\bf r})d{\bf r} = \int V^{eff}({\bf r}) \sum_{i}\sum_{\xi \xi'} f_i \langle \psi_i | \beta_{\xi} \rangle Q_{\xi \xi'}({\bf r}) \langle \beta_{\xi'} | \psi_i \rangle d{\bf r} = * \sum_{i}\sum_{\xi \xi'} f_i \langle \psi_i | \beta_{\xi} \rangle D_{\xi \xi'}^{aug} \langle \beta_{\xi'} | \psi_i \rangle * \f] * Now we can rewrite the total energy expression: * \f[ * E_{tot} = \sum_{i} f_i \langle \psi_i | \Big( \hat T + \sum_{\xi \xi'} |\beta_{\xi} \rangle D_{\xi \xi'}^{ion} + D_{\xi \xi'}^{aug} \langle \beta_{\xi'} |\Big) | \psi_i \rangle + * \int V^{eff}({\bf r})\rho^{ps}({\bf r})d{\bf r} - \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} - \int V^{XC}({\bf r})\rho({\bf r}) d{\bf r} + E^{XC}[\rho + \rho_{core}] * \f] * From the Kohn-Sham equations * \f[ * \Big( \hat T + \sum_{\xi \xi'} |\beta_{\xi} \rangle D_{\xi \xi'}^{ion} + D_{\xi \xi'}^{aug} \langle \beta_{\xi'}| + \hat V^{eff} \Big) | \psi_i \rangle = \varepsilon_i \Big( 1+\hat S \Big) |\psi_i \rangle * \f] * we can extract the sum * \f[ * \sum_{i} f_i \langle \psi_i | \Big( \hat T + \sum_{\xi \xi'} |\beta_{\xi} \rangle D_{\xi \xi'}^{ion} + D_{\xi \xi'}^{aug} \langle \beta_{\xi'} |\Big) | \psi_i \rangle = * \sum_{i} f_i \varepsilon_i - \int V^{eff}({\bf r}) \rho^{ps}({\bf r}) d{\bf r} * \f] * Combining all together we obtain the following expression for total energy: * \f[ * E_{tot} = \sum_{i} f_i \varepsilon_i - \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} - \int V^{XC}({\bf r})\rho({\bf r}) d{\bf r} + E^{XC}[\rho + \rho_{core}] * \f] */ double total_energy() { double tot_en{0}; switch (ctx_.esm_type()) { case electronic_structure_method_t::full_potential_lapwlo: { tot_en = (energy_kin() + energy_exc() + 0.5 * energy_vha() + energy_enuc()); break; } case electronic_structure_method_t::pseudopotential: { //tot_en = (kset_.valence_eval_sum() - energy_veff() + energy_vloc() - potential_.PAW_one_elec_energy()) + // 0.5 * energy_vha() + energy_exc() + potential_.PAW_total_energy() + ewald_energy_; tot_en = (kset_.valence_eval_sum() - energy_vxc() - potential_.PAW_one_elec_energy()) - 0.5 * energy_vha() + energy_exc() + potential_.PAW_total_energy() + ewald_energy_; break; } } if (ctx_.hubbard_correction()) { tot_en += H_.U().hubbard_energy(); } return tot_en; } void symmetrize(Periodic_function<double>* f__, Periodic_function<double>* gz__, Periodic_function<double>* gx__, Periodic_function<double>* gy__) { PROFILE("sirius::DFT_ground_state::symmetrize"); auto& comm = ctx_.comm(); auto& remap_gvec = ctx_.remap_gvec(); if (ctx_.control().print_hash_) { auto h = f__->hash_f_pw(); if (ctx_.comm().rank() == 0) { print_hash("f_unsymmetrized(G)", h); } } unit_cell_.symmetry().symmetrize_function(&f__->f_pw_local(0), remap_gvec, ctx_.sym_phase_factors()); if (ctx_.control().print_hash_) { auto h = f__->hash_f_pw(); if (ctx_.comm().rank() == 0) { print_hash("f_symmetrized(G)", h); } } /* symmetrize PW components */ switch (ctx_.num_mag_dims()) { case 1: { unit_cell_.symmetry().symmetrize_vector_function(&gz__->f_pw_local(0), remap_gvec, ctx_.sym_phase_factors()); break; } case 3: { if (ctx_.control().print_hash_) { auto h1 = gx__->hash_f_pw(); auto h2 = gy__->hash_f_pw(); auto h3 = gz__->hash_f_pw(); if (ctx_.comm().rank() == 0) { print_hash("fx_unsymmetrized(G)", h1); print_hash("fy_unsymmetrized(G)", h2); print_hash("fz_unsymmetrized(G)", h3); } } unit_cell_.symmetry().symmetrize_vector_function(&gx__->f_pw_local(0), &gy__->f_pw_local(0), &gz__->f_pw_local(0), remap_gvec, ctx_.sym_phase_factors()); if (ctx_.control().print_hash_) { auto h1 = gx__->hash_f_pw(); auto h2 = gy__->hash_f_pw(); auto h3 = gz__->hash_f_pw(); if (ctx_.comm().rank() == 0) { print_hash("fx_symmetrized(G)", h1); print_hash("fy_symmetrized(G)", h2); print_hash("fz_symmetrized(G)", h3); } } break; } } if (ctx_.full_potential()) { /* symmetrize MT components */ unit_cell_.symmetry().symmetrize_function(f__->f_mt(), comm); switch (ctx_.num_mag_dims()) { case 1: { unit_cell_.symmetry().symmetrize_vector_function(gz__->f_mt(), comm); break; } case 3: { unit_cell_.symmetry().symmetrize_vector_function(gx__->f_mt(), gy__->f_mt(), gz__->f_mt(), comm); break; } } } } Band & band() { return band_; } json serialize() { json dict; dict["mpi_grid"] = ctx_.mpi_grid_dims(); std::vector<int> fftgrid(3); for (int i = 0; i < 3; i++) { fftgrid[i] = ctx_.fft().grid().size(i); } dict["fft_grid"] = fftgrid; if (!ctx_.full_potential()) { for (int i = 0; i < 3; i++) { fftgrid[i] = ctx_.fft_coarse().grid().size(i); } dict["fft_coarse_grid"] = fftgrid; } dict["num_fv_states"] = ctx_.num_fv_states(); dict["num_bands"] = ctx_.num_bands(); dict["aw_cutoff"] = ctx_.aw_cutoff(); dict["pw_cutoff"] = ctx_.pw_cutoff(); dict["omega"] = ctx_.unit_cell().omega(); dict["chemical_formula"] = ctx_.unit_cell().chemical_formula(); dict["num_atoms"] = ctx_.unit_cell().num_atoms(); dict["energy"] = json::object(); dict["energy"]["total"] = total_energy(); dict["energy"]["enuc"] = energy_enuc(); dict["energy"]["core_eval_sum"] = core_eval_sum(); dict["energy"]["vha"] = energy_vha(); dict["energy"]["vxc"] = energy_vxc(); dict["energy"]["exc"] = energy_exc(); dict["energy"]["bxc"] = energy_bxc(); dict["energy"]["veff"] = energy_veff(); dict["energy"]["eval_sum"] = eval_sum(); dict["energy"]["kin"] = energy_kin(); dict["energy"]["ewald"] = energy_ewald(); dict["efermi"] = kset_.energy_fermi(); dict["band_gap"] = kset_.band_gap(); dict["core_leakage"] = density_.core_leakage(); return std::move(dict); } }; inline double DFT_ground_state::ewald_energy() { PROFILE("sirius::DFT_ground_state::ewald_energy"); double alpha = 1.5; double ewald_g = 0; #pragma omp parallel { double ewald_g_pt = 0; #pragma omp for for (int igloc = 0; igloc < ctx_.gvec().count(); igloc++) { int ig = ctx_.gvec().offset() + igloc; if (!ig) { continue; } double g2 = std::pow(ctx_.gvec().gvec_len(ig), 2); double_complex rho(0, 0); for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { rho += ctx_.gvec_phase_factor(ig, ia) * static_cast<double>(unit_cell_.atom(ia).zn()); } ewald_g_pt += std::pow(std::abs(rho), 2) * std::exp(-g2 / 4 / alpha) / g2; } #pragma omp critical ewald_g += ewald_g_pt; } ctx_.comm().allreduce(&ewald_g, 1); if (ctx_.gvec().reduced()) { ewald_g *= 2; } /* remaining G=0 contribution */ ewald_g -= std::pow(unit_cell_.num_electrons(), 2) / alpha / 4; ewald_g *= (twopi / unit_cell_.omega()); /* remove self-interaction */ for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { ewald_g -= std::sqrt(alpha / pi) * std::pow(unit_cell_.atom(ia).zn(), 2); } double ewald_r = 0; #pragma omp parallel { double ewald_r_pt = 0; #pragma omp for for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { for (int i = 1; i < unit_cell_.num_nearest_neighbours(ia); i++) { int ja = unit_cell_.nearest_neighbour(i, ia).atom_id; double d = unit_cell_.nearest_neighbour(i, ia).distance; ewald_r_pt += 0.5 * unit_cell_.atom(ia).zn() * unit_cell_.atom(ja).zn() * gsl_sf_erfc(std::sqrt(alpha) * d) / d; } } #pragma omp critical ewald_r += ewald_r_pt; } return (ewald_g + ewald_r); } inline int DFT_ground_state::find(double potential_tol, double energy_tol, int num_dft_iter, bool write_state) { PROFILE("sirius::DFT_ground_state::scf_loop"); double eold{0}, rms{0}; if (ctx_.full_potential()) { potential_.mixer_init(); } else { density_.mixer_init(); } int result{-1}; if (ctx_.hubbard_correction()) { H_.U().hubbard_compute_occupation_numbers(kset_); H_.U().calculate_hubbard_potential_and_energy(); } for (int iter = 0; iter < num_dft_iter; iter++) { sddk::timer t1("sirius::DFT_ground_state::scf_loop|iteration"); if (ctx_.comm().rank() == 0) { printf("\n"); printf("+------------------------------+\n"); printf("| SCF iteration %3i out of %3i |\n", iter, num_dft_iter); printf("+------------------------------+\n"); } /* find new wave-functions */ band_.solve_for_kset(kset_, H_, true); /* find band occupancies */ kset_.find_band_occupancies(); /* generate new density from the occupied wave-functions */ density_.generate(kset_); /* symmetrize density and magnetization */ if (ctx_.use_symmetry()) { symmetrize(&density_.rho(), &density_.magnetization(0), &density_.magnetization(1), &density_.magnetization(2)); if (ctx_.esm_type() == electronic_structure_method_t::pseudopotential) { density_.symmetrize_density_matrix(); } } /* set new tolerance of iterative solver */ if (!ctx_.full_potential()) { rms = density_.mix(); double tol = std::max(1e-12, 0.1 * density_.dr2() / ctx_.unit_cell().num_valence_electrons()); /* print dr2 of mixer and current iterative solver tolerance */ if (ctx_.comm().rank() == 0) { printf("dr2: %18.12E, tol: %18.12E\n", density_.dr2(), tol); } ctx_.set_iterative_solver_tolerance(std::min(ctx_.iterative_solver_tolerance(), tol)); } if (!ctx_.full_potential()) { density_.generate_paw_loc_density(); } /* transform density to realspace after mixing and symmetrization */ density_.fft_transform(1); /* check number of elctrons */ density_.check_num_electrons(); /* compute new potential */ potential_.generate(density_); /* symmetrize potential and effective magnetic field */ if (ctx_.use_symmetry()) { symmetrize(potential_.effective_potential(), potential_.effective_magnetic_field(0), potential_.effective_magnetic_field(1), potential_.effective_magnetic_field(2)); } /* transform potential to real space after symmetrization */ potential_.fft_transform(1); /* compute new total energy for a new density */ double etot = total_energy(); if (ctx_.full_potential()) { rms = potential_.mix(); double tol = std::max(1e-12, 0.001 * rms); //if (ctx_.comm().rank() == 0) { // printf("tol: %18.10f\n", tol); //} ctx_.set_iterative_solver_tolerance(std::min(ctx_.iterative_solver_tolerance(), tol)); } /* write some information */ print_info(); if (ctx_.comm().rank() == 0) { printf("iteration : %3i, RMS %18.12E, energy difference : %18.12E\n", iter, rms, etot - eold); } if (ctx_.full_potential()) { if (std::abs(eold - etot) < energy_tol && rms < potential_tol) { result = iter; break; } } if (!ctx_.full_potential()) { if (std::abs(eold - etot) < energy_tol && density_.dr2() < potential_tol) { if (ctx_.comm().rank() == 0) { printf("\n"); printf("converged after %i SCF iterations!\n", iter + 1); printf("energy difference : %18.12E < %18.12E\n", std::abs(eold - etot), energy_tol); printf("density difference : %18.12E < %18.12E\n", density_.dr2(), potential_tol); } result = iter; break; } } /* Compute the hubbard correction */ if(ctx_.hubbard_correction()) { H_.U().hubbard_compute_occupation_numbers(kset_); H_.U().mix(); H_.U().calculate_hubbard_potential_and_energy(); } eold = etot; } if (write_state) { ctx_.create_storage_file(); if (ctx_.full_potential()) { // TODO: why this is necessary? density_.rho().fft_transform(-1); for (int j = 0; j < ctx_.num_mag_dims(); j++) { density_.magnetization(j).fft_transform(-1); } } potential_.save(); density_.save(); } return result; } inline void DFT_ground_state::print_magnetic_moment() { mdarray<double, 2> mmom; if (!ctx_.full_potential() && ctx_.num_mag_dims()) { mmom = density_.compute_atomic_mag_mom(); } if (!ctx_.full_potential() && ctx_.num_mag_dims() && ctx_.comm().rank() == 0) { printf("Magnetic moments\n"); for (int i = 0; i < 80; i++) printf("-"); printf("\n"); printf("atom moment |moment|"); printf("\n"); for (int i = 0; i < 80; i++) printf("-"); printf("\n"); for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { vector3d<double> v({mmom(1, ia), mmom(2, ia), mmom(0, ia)}); printf("%4i [%8.4f, %8.4f, %8.4f] %10.6f\n", ia, v[0], v[1], v[2], v.length()); } } } inline void DFT_ground_state::print_info() { double evalsum1 = kset_.valence_eval_sum(); double evalsum2 = core_eval_sum(); double ekin = energy_kin(); double evxc = energy_vxc(); double eexc = energy_exc(); double ebxc = energy_bxc(); double evha = energy_vha(); double etot = total_energy(); double gap = kset_.band_gap() * ha2ev; double ef = kset_.energy_fermi(); double enuc = energy_enuc(); double one_elec_en = evalsum1 - (evxc + evha); if (ctx_.esm_type() == electronic_structure_method_t::pseudopotential) { one_elec_en -= potential_.PAW_one_elec_energy(); } std::vector<double> mt_charge; double it_charge; double total_charge = density_.rho().integrate(mt_charge, it_charge); double total_mag[3]; std::vector<double> mt_mag[3]; double it_mag[3]; for (int j = 0; j < ctx_.num_mag_dims(); j++) { total_mag[j] = density_.magnetization(j).integrate(mt_mag[j], it_mag[j]); } mdarray<double, 2> mmom; if (!ctx_.full_potential() && ctx_.num_mag_dims()) { mmom = compute_atomic_mag_mom(); } if (ctx_.comm().rank() == 0) { if (ctx_.full_potential()) { double total_core_leakage = 0.0; printf("\n"); printf("Charges and magnetic moments\n"); for (int i = 0; i < 80; i++) printf("-"); printf("\n"); printf("atom charge core leakage"); if (ctx_.num_mag_dims()) printf(" moment |moment|"); printf("\n"); for (int i = 0; i < 80; i++) printf("-"); printf("\n"); for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { double core_leakage = unit_cell_.atom(ia).symmetry_class().core_leakage(); total_core_leakage += core_leakage; printf("%4i %10.6f %10.8e", ia, mt_charge[ia], core_leakage); if (ctx_.num_mag_dims()) { vector3d<double> v; v[2] = mt_mag[0][ia]; if (ctx_.num_mag_dims() == 3) { v[0] = mt_mag[1][ia]; v[1] = mt_mag[2][ia]; } printf(" [%8.4f, %8.4f, %8.4f] %10.6f", v[0], v[1], v[2], v.length()); } printf("\n"); } printf("\n"); printf("total core leakage : %10.8e\n", total_core_leakage); printf("interstitial charge : %10.6f\n", it_charge); if (ctx_.num_mag_dims()) { vector3d<double> v; v[2] = it_mag[0]; if (ctx_.num_mag_dims() == 3) { v[0] = it_mag[1]; v[1] = it_mag[2]; } printf("interstitial moment : [%8.4f, %8.4f, %8.4f], magnitude : %10.6f\n", v[0], v[1], v[2], v.length()); } } printf("total charge : %10.6f\n", total_charge); if (ctx_.num_mag_dims()) { vector3d<double> v; v[2] = total_mag[0]; if (ctx_.num_mag_dims() == 3) { v[0] = total_mag[1]; v[1] = total_mag[2]; } printf("total moment : [%8.4f, %8.4f, %8.4f], magnitude : %10.6f\n", v[0], v[1], v[2], v.length()); } printf("\n"); printf("Energy\n"); for (int i = 0; i < 80; i++) printf("-"); printf("\n"); printf("valence_eval_sum : %18.8f\n", evalsum1); if (ctx_.full_potential()) { printf("core_eval_sum : %18.8f\n", evalsum2); printf("kinetic energy : %18.8f\n", ekin); printf("enuc : %18.8f\n", enuc); } printf("<rho|V^{XC}> : %18.8f\n", evxc); printf("<rho|E^{XC}> : %18.8f\n", eexc); printf("<mag|B^{XC}> : %18.8f\n", ebxc); printf("<rho|V^{H}> : %18.8f\n", evha); if (!ctx_.full_potential()) { printf("one-electron contribution : %18.8f (Ha), %18.8f (Ry)\n", one_elec_en, one_elec_en * 2); // eband + deband in QE printf("hartree contribution : %18.8f\n", 0.5 * evha); printf("xc contribution : %18.8f\n", eexc); printf("ewald contribution : %18.8f\n", ewald_energy_); printf("PAW contribution : %18.8f\n", potential_.PAW_total_energy()); } if(ctx_.hubbard_correction()) { printf("Hubbard energy : %18.8f (Ha), %18.8f (Ry)\n", H_.U().hubbard_energy(), H_.U().hubbard_energy() * 2.0); } printf("Total energy : %18.8f (Ha), %18.8f (Ry)\n", etot, etot * 2); printf("\n"); printf("band gap (eV) : %18.8f\n", gap); printf("Efermi : %18.8f\n", ef); printf("\n"); //if (ctx_.control().verbosity_ >= 3 && !ctx_.full_potential()) { // for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { // printf("atom: %i\n", ia); // int nbf = unit_cell_.atom(ia).type().mt_basis_size(); // for (int j = 0; j < ctx_.num_mag_comp(); j++) { // //printf("component of density matrix: %i\n", j); // //for (int xi1 = 0; xi1 < nbf; xi1++) { // // for (int xi2 = 0; xi2 < nbf; xi2++) { // // auto z = density_.density_matrix()(xi1, xi2, j, ia); // // printf("(%f, %f) ", z.real(), z.imag()); // // } // // printf("\n"); // //} // printf("diagonal components of density matrix: %i\n", j); // for (int xi2 = 0; xi2 < nbf; xi2++) { // auto z = density_.density_matrix()(xi2, xi2, j, ia); // printf("(%10.6f, %10.6f) ", z.real(), z.imag()); // } // printf("\n"); // } // } //} } } } // namespace #endif // __DFT_GROUND_STATE_H__ /** \page DFT Spin-polarized DFT * \section section1 Preliminary notes * * \note Here and below sybol \f$ {\boldsymbol \sigma} \f$ is reserved for the vector of Pauli matrices. Spin components * are labeled with \f$ \alpha \f$ or \f$ \beta \f$. * * Wave-function of spin-1/2 particle is a two-component spinor: * \f[ * {\bf \Psi}({\bf r}) = \left( \begin{array}{c} \psi^{\uparrow}({\bf r}) \\ \psi^{\downarrow}({\bf r}) \end{array} \right) * \f] * Operator of spin: * \f[ * {\bf \hat S}=\frac{\hbar}{2}{\boldsymbol \sigma}, * \f] * Pauli matrices: * \f[ * \sigma_x=\left( \begin{array}{cc} * 0 & 1 \\ * 1 & 0 \\ \end{array} \right) \, * \sigma_y=\left( \begin{array}{cc} * 0 & -i \\ * i & 0 \\ \end{array} \right) \, * \sigma_z=\left( \begin{array}{cc} * 1 & 0 \\ * 0 & -1 \\ \end{array} \right) * \f] * * Spin moment of an electron in quantum state \f$ | \Psi \rangle \f$: * \f[ * {\bf S}=\langle \Psi | {\bf \hat S} | \Psi \rangle = \frac{\hbar}{2} \langle \Psi | {\boldsymbol \sigma} | \Psi \rangle * \f] * * Spin magnetic moment of electron: * \f[ * {\bf \mu}_e=\gamma_e {\bf S}, * \f] * where \f$ \gamma_e \f$ is the gyromagnetic ratio for the electron. * \f[ * \gamma_e=-\frac{g_e \mu_B}{\hbar} \;\;\; \mu_B=\frac{e\hbar}{2m_ec} * \f] * Here \f$ g_e \f$ is a g-factor for electron which is ~2, and \f$ \mu_B \f$ - Bohr magneton (defined as positive constant). * Finally, magnetic moment of electron: * \f[ * {\bf \mu}_e=-{\bf \mu}_B \langle \Psi | {\boldsymbol \sigma} | \Psi \rangle * \f] * Potential energy of magnetic dipole in magnetic field: * \f[ * U=-{\bf B}{\bf \mu}={\bf \mu}_B {\bf B} \langle \Psi | {\boldsymbol \sigma} | \Psi \rangle * \f] * * \section section2 Density and magnetization * In magnetic calculations we have charge density \f$ \rho({\bf r}) \f$ (scalar function) and magnetization density * \f$ {\bf m}({\bf r}) \f$ (vector function). Density is defined as: * \f[ * \rho({\bf r}) = \sum_{j}^{occ} \Psi_{j}^{*}({\bf r}){\bf I} \Psi_{j}({\bf r}) = * \sum_{j}^{occ} \psi_{j}^{\uparrow *}({\bf r}) \psi_{j}^{\uparrow}({\bf r}) + * \psi_{j}^{\downarrow *}({\bf r}) \psi_{j}^{\downarrow}({\bf r}) * \f] * Magnetization is defined as: * \f[ * {\bf m}({\bf r}) = \sum_{j}^{occ} \Psi_{j}^{*}({\bf r}) {\boldsymbol \sigma} \Psi_{j}({\bf r}) * \f] * \f[ * m_x({\bf r}) = \sum_{j}^{occ} \psi_{j}^{\uparrow *}({\bf r}) \psi_{j}^{\downarrow}({\bf r}) + * \psi_{j}^{\downarrow *}({\bf r}) \psi_{j}^{\uparrow}({\bf r}) * \f] * \f[ * m_y({\bf r}) = \sum_{j}^{occ} -i \psi_{j}^{\uparrow *}({\bf r}) \psi_{j}^{\downarrow}({\bf r}) + * i \psi_{j}^{\downarrow *}({\bf r}) \psi_{j}^{\uparrow}({\bf r}) * \f] * \f[ * m_z({\bf r}) = \sum_{j}^{occ} \psi_{j}^{\uparrow *}({\bf r}) \psi_{j}^{\uparrow}({\bf r}) - * \psi_{j}^{\downarrow *}({\bf r}) \psi_{j}^{\downarrow}({\bf r}) * \f] * Density and magnetization can be grouped into a \f$ 2 \times 2 \f$ density matrix, which is defined as: * \f[ * {\boldsymbol \rho}({\bf r}) = \frac{1}{2} \Big( {\bf I}\rho({\bf r}) + {\boldsymbol \sigma} {\bf m}({\bf r})\Big) = * \frac{1}{2} \left( \begin{array}{cc} \rho({\bf r}) + m_z({\bf r}) & m_x({\bf r}) - i m_y({\bf r}) \\ * m_x({\bf r}) + i m_y({\bf r}) & \rho({\bf r}) - m_z({\bf r}) \end{array} \right) = * \sum_{j}^{occ} \left( \begin{array}{cc} \psi_{j}^{\uparrow *}({\bf r}) \psi_{j}^{\uparrow}({\bf r}) & * \psi_{j}^{\downarrow *}({\bf r}) \psi_{j}^{\uparrow}({\bf r}) \\ * \psi_{j}^{\uparrow *}({\bf r}) \psi_{j}^{\downarrow}({\bf r}) & * \psi_{j}^{\downarrow *}({\bf r}) \psi_{j}^{\downarrow}({\bf r}) \end{array} \right) * \f] * or simply * \f[ * \rho_{\alpha \beta}({\bf r}) = \sum_{j}^{occ} \psi_{j}^{\beta *}({\bf r})\psi_{j}^{\alpha}({\bf r}) * \f] * Pay attention to the order of spin indices in the \f$ 2 \times 2 \f$ density matrix. * External potential \f$ v^{ext}({\bf r}) \f$ and external magnetic field \f$ {\bf B}^{ext}({\bf r}) \f$ can * also be grouped into a \f$ 2 \times 2 \f$ matrix: * \f[ * V_{\alpha\beta}^{ext}({\bf r})=\Big({\bf I}v^{ext}({\bf r})+\mu_{B}{\boldsymbol \sigma}{\bf B}^{ext}({\bf r}) \Big) = * \left( \begin{array}{cc} v^{ext}({\bf r})+\mu_{B}B_z^{ext}({\bf r}) & \mu_{B} \Big( B_x^{ext}({\bf r})-iB_y^{exp}({\bf r}) \Big) \\ * \mu_{B} \Big( B_x^{ext}({\bf r})+iB_y^{ext}({\bf r}) \Big) & v^{ext}({\bf r})-\mu_{B}B_z^{ext}({\bf r}) \end{array} \right) * \f] * Let's check that potential energy in external fields can be written in the following way: * \f[ * E^{ext}=\int Tr \Big( {\boldsymbol \rho}({\bf r}) {\bf V}^{ext}({\bf r}) \Big) d{\bf r} = * \sum_{\alpha\beta} \int \rho_{\alpha\beta}({\bf r})V_{\beta\alpha}^{ext}({\bf r}) d{\bf r} * \f] * (below \f$ {\bf r} \f$, \f$ ext \f$ and \f$ \int d{\bf r} \f$ are dropped for simplicity) * \f[ * \begin{eqnarray} * \rho_{11}V_{11} &= \frac{1}{2}(\rho+m_z)(v+\mu_{B}B_z) = \frac{1}{2}(\rho v +\mu_{B} \rho B_z + m_z v + \mu_{B}m_zB_z) \\ * \rho_{22}V_{22} &= \frac{1}{2}(\rho-m_z)(v-\mu_{B}B_z) = \frac{1}{2}(\rho v -\mu_{B} \rho B_z - m_z v + \mu_{B}m_zB_z) \\ * \rho_{12}V_{21} &= \frac{1}{2}(m_x-im_y)\Big( \mu_{B}( B_x+iB_y) \Big) = \frac{\mu_B}{2}(m_xB_x+im_xB_y-im_yB_x+m_yB_y) \\ * \rho_{21}V_{12} &= \frac{1}{2}(m_x+im_y)\Big( \mu_{B}( B_x-iB_y) \Big) = \frac{\mu_B}{2}(m_xB_x-im_xB_y+im_yB_x+m_yB_y) * \end{eqnarray} * \f] * The sum of this four terms will give exactly \f$ \int \rho({\bf r}) v^{ext}({\bf r}) + \mu_{B}{\bf m}({\bf r}) {\bf B}^{ext}({\bf r}) d{\bf r}\f$ * * \section section3 Total energy variation * * To derive Kohn-Sham equations we need to write total energy functional of density matrix \f$ \rho_{\alpha\beta}({\bf r}) \f$: * \f[ * E^{tot}[\rho_{\alpha\beta}] = E^{kin}[\rho_{\alpha\beta}] + E^{H}[\rho_{\alpha\beta}] + E^{ext}[\rho_{\alpha\beta}] + * E^{XC}[\rho_{\alpha\beta}] * \f] * Kinetic energy of non-interacting electrons is written in the following way: * \f[ * E^{kin}[\rho_{\alpha\beta}] \equiv E^{kin}[\Psi[\rho_{\alpha\beta}]] = * -\frac{1}{2} \sum_{i}^{occ}\sum_{\alpha} \int \psi_{i}^{\alpha*}({\bf r}) \nabla^{2} \psi_{i}^{\alpha}({\bf r})d^3{\bf r} * \f] * Hartree energy: * \f[ * E^{H}[\rho_{\alpha\beta}]= \frac{1}{2} \iint \frac{\rho({\bf r})\rho({\bf r'})}{|{\bf r}-{\bf r'}|} d{\bf r} d{\bf r'} = * \frac{1}{2} \iint \sum_{\alpha\beta}\delta_{\alpha\beta} \frac{\rho_{\alpha\beta}({\bf r}) \rho({\bf r'})}{|{\bf r}-{\bf r'}|} d{\bf r} d{\bf r'} * \f] * where \f$ \rho({\bf r}) = Tr \rho_{\alpha\beta}({\bf r}) \f$. * * Exchange-correlation energy: * \f[ * E^{XC}[\rho_{\alpha\beta}({\bf r})] \equiv E^{XC}[\rho({\bf r}),|{\bf m}({\bf r})|] = * \int \rho({\bf r}) \eta_{XC}(\rho({\bf r}), m({\bf r})) d{\bf r} = * \int \rho({\bf r}) \eta_{XC}(\rho^{\uparrow}({\bf r}), \rho_{\downarrow}({\bf r})) d{\bf r} * \f] * Now we can write the total energy variation over auxiliary orbitals with constrain of orbital normalization: * \f[ * \frac{\delta \Big( E^{tot}+\varepsilon_i \big( 1-\sum_{\alpha} \int \psi^{\alpha*}_{i}({\bf r})\psi^{\alpha}_{i}({\bf r})d{\bf r} \big) \Big)} * {\delta \psi_{i}^{\gamma*}({\bf r})} = 0 * \f] * We will use the following chain rule: * \f[ * \frac{\delta F[\rho_{\alpha\beta}]}{\delta \psi_{i}^{\gamma *}({\bf r})} = * \sum_{\alpha' \beta'} \frac{\delta F[\rho_{\alpha\beta}]}{\delta \rho_{\alpha'\beta'}({\bf r})} * \frac{\delta \rho_{\alpha'\beta'}({\bf r})}{\delta \psi_{i}^{\gamma *}({\bf r})} = * \sum_{\alpha'\beta'}\frac{\delta F[\rho_{\alpha\beta}]}{\delta \rho_{\alpha'\beta'}({\bf r})} * \psi_{i}^{\alpha'}({\bf r}) \delta_{\beta'\gamma} = * \sum_{\alpha'}\frac{\delta F[\rho_{\alpha\beta}]}{\delta \rho_{\alpha'\gamma}({\bf r})}\psi_{i}^{\alpha'}({\bf r}) * \f] * Variation of the normalization integral: * \f[ * \frac{\delta \sum_{\alpha} \int \psi_{i}^{\alpha*}({\bf r}) \psi_{i}^{\alpha}({\bf r}) d {\bf r} }{\delta \psi_{i}^{\gamma *}({\bf r})} = * \psi_{i}^{\gamma}({\bf r}) * \f] * Variation of the kinetic energy functional: * \f[ * \frac{\delta E^{kin}}{\delta \psi_{i}^{\gamma*}({\bf r})} = -\frac{1}{2} \sum_{\alpha} \nabla^{2} \psi_{i}^{\alpha}({\bf r}) * \delta_{\alpha\gamma} = -\frac{1}{2}\nabla^{2}\psi_{i}^{\gamma}({\bf r}) * \f] * Variation of the Hartree energy functional: * \f[ * \frac{\delta E^{H}[\rho_{\alpha\beta}]}{\delta \psi_{i}^{\gamma *}({\bf r})} = * \sum_{\alpha'} \sum_{\alpha\beta} \delta_{\alpha\beta} \frac{1}{2} \int \frac{ \rho({\bf r'})}{|{\bf r}-{\bf r'}|} d{\bf r'} * \delta_{\alpha\alpha'}\delta_{\beta\gamma} \psi_{i}^{\alpha'}({\bf r}) = v^{H}({\bf r}) \psi_{i}^{\gamma}({\bf r}) * \f] * Variation of the external energy functional: * \f[ * \frac{\delta E^{ext}[\rho_{\alpha\beta}]}{\delta \psi_{i}^{\gamma*}({\bf r}) } = * \sum_{\alpha'} \sum_{\alpha\beta} V_{\beta\alpha}^{ext}({\bf r}) \delta_{\alpha\alpha'} \delta_{\beta\gamma} \psi_{i}^{\alpha'}({\bf r})= * \sum_{\alpha} V_{\gamma\alpha}^{ext}({\bf r}) \psi_{i}^{\alpha}({\bf r}) * \f] * * Variation of the exchange-correlation functional: * \f[ * \frac{\delta E^{XC}[\rho_{\alpha\beta}]}{ \delta \psi_{i}^{\gamma*}({\bf r}) } = * \frac{\delta E^{XC}[\rho_{\alpha\beta}]}{ \delta \rho({\bf r})} \frac{\delta \rho({\bf r})}{\delta \psi_{i}^{\gamma*}({\bf r})} + * \frac{\delta E^{XC}[\rho_{\alpha\beta}]}{ \delta m({\bf r})} \sum_{p=x,y,z} \frac{\delta m({\bf r})}{ \delta m_p({\bf r})} * \frac{\delta m_p({\bf r})}{\delta \psi_{i}^{\gamma*}({\bf r})} * \f] * where \f$ m({\bf r}) = |{\bf m}({\bf r})|\f$ is the length of magnetization vector. * * First term: * \f[ * \frac{\delta E^{XC}[\rho_{\alpha\beta}]}{ \delta \rho({\bf r})} \frac{\delta \rho({\bf r})}{\delta \psi_{i}^{\gamma*}({\bf r})} = * v^{XC}({\bf r}) \psi_{i}^{\gamma}({\bf r}) * \f] * Second term: * \f[ * \frac{\delta E^{XC}[\rho_{\alpha\beta}]}{ \delta m({\bf r})} \sum_{p=x,y,z} \frac{\delta m({\bf r})}{ \delta m_p({\bf r})} * \frac{\delta m_p({\bf r})}{\delta \psi_{i}^{\gamma*}({\bf r})} = * B^{XC}({\bf r}) \hat {\bf m} \sum_{\beta} {\boldsymbol \sigma}_{\gamma \beta} \psi_{i}^{\beta}({\bf r}) * \f] * where \f$ B^{XC}({\bf r}) = \frac{\delta E^{XC}[\rho_{\alpha\beta}]}{ \delta m({\bf r})} \f$, * \f$ \hat {\bf m}({\bf r}) = \frac{\delta m({\bf r})}{ \delta {\bf m}({\bf r})} \f$ is the unit vector, * parallel to \f$ {\bf m}({\bf r}) \f$ * and \f$ {\bf m}({\bf r}) = \sum_{i} \sum_{\alpha \beta} \psi_{i}^{\alpha*}({\bf r}) {\boldsymbol \sigma}_{\alpha \beta} \psi_i^{\beta}({\bf r}) \f$ * * Similarly to external potential, exchange-correlation potential can be grouped into \f$ 2 \times 2 \f$ matrix:: * \f[ * \frac{\delta E^{XC}[\rho_{\alpha\beta}]}{\delta \rho_{\alpha'\beta'}({\bf r})} \equiv V^{XC}_{\beta'\alpha'}({\bf r}) = * \Big( {\bf I}v^{XC}({\bf r}) + {\bf B}^{XC}({\bf r}) {\boldsymbol \sigma} \Big)_{\beta'\alpha'} * \f] * where \f${\bf B}^{XC}({\bf r}) = \hat {\bf m}({\bf r})B^{XC}({\bf r}) \f$ -- exchange-correlation magnetic field, * parallel to \f$ {\bf m}({\bf r}) \f$ at each point in space. We can now collect \f$ v^{H}({\bf r}) \f$, * \f$ V_{\alpha\beta}^{ext}({\bf r}) \f$ and \f$V_{\alpha\beta}^{XC}({\bf r}) \f$ to one effective potential: * \f[ * V^{eff}_{\alpha\beta}({\bf r}) = v^{H}({\bf r})\delta_{\alpha\beta} + V_{\alpha\beta}^{ext}({\bf r}) + * V_{\alpha\beta}^{XC}({\bf r}) = * \Big({\bf I}\big(v^{H}({\bf r})+v^{ext}({\bf r})+v^{XC}({\bf r})\big) + * {\boldsymbol \sigma}\big( \mu_{B}{\bf B}^{ext}({\bf r}) + {\bf B}^{XC}({\bf r})\big)\Big)_{\alpha\beta} * \f] * and finally, we arrive to the following Kohn-Sham equation for each component \f$ \gamma \f$ of spinor wave-functions: * \f[ * -\frac{1}{2}\nabla^{2}\psi_{i}^{\gamma}({\bf r}) + \sum_{\alpha} V_{\gamma\alpha}^{eff}({\bf r}) \psi_{i}^{\alpha}({\bf r}) = * \varepsilon_i \psi_{i}^{\gamma}({\bf r}) * \f] * or in matrix form * \f[ * \left( \begin{array}{cc} -\frac{1}{2}\nabla^2+V^{eff}_{\uparrow \uparrow} & V^{eff}_{\uparrow \downarrow} \\ * V^{eff}_{\downarrow \uparrow} & -\frac{1}{2}\nabla^2+V^{eff}_{\downarrow \downarrow} \end{array}\right) * \left(\begin{array}{c} \psi_{i}^{\uparrow}({\bf r}) \\ \psi_{i}^{\downarrow} ({\bf r}) \end{array} \right) = \varepsilon_i * \left(\begin{array}{c} \psi_{i}^{\uparrow}({\bf r}) \\ \psi_{i}^{\downarrow}({\bf r}) \end{array} \right) * \f] * * \section section4 Second-variational approach * * Suppose that we know first \f$ N_{fv} \f$ solutions of the following equation (so-called first variational equation): * \f[ * \Big(-\frac{1}{2}\nabla^2+v^{H}({\bf r})+v^{ext}({\bf r})+v^{XC}({\bf r}) \Big)\phi_{i}({\bf r}) = \epsilon_i \phi_{i}({\bf r}) * \f] * We can write expansion of the components of spinor wave-functions \f$ \psi^{\alpha}_j({\bf r}) \f$ in terms of * first-variational states \f$ \phi_i({\bf r}) \f$: * \f[ * \psi_{j}^{\alpha}({\bf r}) = \sum_{i}^{N_{fv}}C_{ij}^{\alpha}\phi_{i}({\bf r}) * \f] * Next, we switch to matrix equation: * \f[ * \langle \Psi_{j'}| \hat H | \Psi_{j} \rangle = \varepsilon_j \delta_{j'j} \\ * \sum_{\alpha \alpha'} \sum_{ii'} C_{i'j'}^{\alpha'*} C_{ij}^{\alpha} \langle \phi_{i'} | \hat H_{\alpha' \alpha} | \phi_{i} \rangle = * \sum_{\alpha \alpha'} \sum_{ii'} C_{i'j'}^{\alpha'*} C_{ij}^{\alpha} H_{\alpha'i', \alpha i} = \varepsilon_j \delta_{j'j} * \f] * * We can combine indices \f$ \{i,\alpha\} \f$ into one 'flat' index \f$ \nu \f$. If we also assume that the number of * spinor wave-functions is equal to \f$ 2 N_{fv} \f$ then we arrive to the well-known eigen decomposition: * \f[ * \sum_{\nu'\nu} C_{\nu' j'}^{*} H_{\nu'\nu} C_{\nu j} = \varepsilon_j \delta_{j'j} * \f] * * The expression for second-variational Hamiltonian is simple: * \f[ * \langle \phi_{i'}|\hat H_{\alpha'\alpha} |\phi_{i} \rangle = * \langle \phi_{i'} | \Big(-\frac{1}{2}\nabla^2 + v^{H}({\bf r}) + v^{ext}({\bf r}) + v^{XC}({\bf r}) \Big) \delta_{\alpha\alpha'}|\phi_{i}\rangle + * \langle \phi_{i'} | {\boldsymbol \sigma}_{\alpha\alpha'} \Big( \mu_{B}{\bf B}^{ext}({\bf r})+{\bf B}^{XC}({\bf r})\Big) | \phi_{i}\rangle =\\ * \epsilon_{i}\delta_{i'i}\delta_{\alpha\alpha'} + {\boldsymbol \sigma}_{\alpha\alpha'} \langle \phi_{i'} | \Big( \mu_{B}{\bf B}^{ext}({\bf r}) + * {\bf B}^{XC}({\bf r})\Big) | \phi_{i}\rangle * \f] */
SPGrid_Threading_Helper.h
//!##################################################################### //! \file SPGrid_Threading_Helper.h //!##################################################################### // Class SPGrid_Threading_Helper //###################################################################### #ifndef __SPGrid_Threading_Helper__ #define __SPGrid_Threading_Helper__ #include <nova/SPGrid/Core/SPGrid_Allocator.h> #include <nova/Tools/Utilities/Pthread_Queue.h> #include <vector> extern Pthread_Queue* pthread_queue; namespace SPGrid_Computations{ using namespace SPGrid; template<class Struct_type,int d,class T_Operation> struct Threading_Operation_Helper: public Pthread_Queue::Task { typedef std::pair<const unsigned long*,unsigned> T_Block; SPGrid_Allocator<Struct_type,d>& allocator; const T_Block blocks; const T_Operation& operation; Threading_Operation_Helper(SPGrid_Allocator<Struct_type,d>& allocator_input,const T_Block& blocks_input,const T_Operation& operation_input) :allocator(allocator_input),blocks(blocks_input),operation(operation_input) {} void Run(){operation.Run(allocator,blocks);} }; template<class Struct_type,int d> class Threading_Helper { typedef std::pair<const unsigned long*,unsigned> T_Block; SPGrid_Allocator<Struct_type,d>& allocator; const T_Block& blocks; public: Threading_Helper(SPGrid_Allocator<Struct_type,d>& allocator_input,const T_Block& blocks_input) :allocator(allocator_input),blocks(blocks_input) {} template<class T_Operation> void Run_Parallel(const T_Operation& operation,std::vector<T_Block> list_of_partitions) {for(int partition=0;partition<list_of_partitions.size();partition++){ Threading_Operation_Helper<Struct_type,d,T_Operation>* task= new Threading_Operation_Helper<Struct_type,d,T_Operation>(allocator,list_of_partitions[partition],operation); pthread_queue->Queue(task);} pthread_queue->Wait();} template<class T_Operation> void Run_Parallel(const T_Operation& operation,const int number_of_partitions) {const unsigned long* block_offsets=blocks.first; const int size=blocks.second; if(size<number_of_partitions*16){operation.Run(allocator,blocks);return;} for(int partition=0;partition<number_of_partitions;partition++){ int first_index_of_partition=(size/number_of_partitions)*(partition)+std::min(size%number_of_partitions,partition); int block_size=(size/number_of_partitions)+((partition<size%number_of_partitions)?1:0); T_Block block(block_offsets+first_index_of_partition,block_size); Threading_Operation_Helper<Struct_type,d,T_Operation>* task= new Threading_Operation_Helper<Struct_type,d,T_Operation>(allocator,block,operation); pthread_queue->Queue(task);} pthread_queue->Wait();} }; template<class Functor> void Run_Parallel_Blocks(const std::pair<const uint64_t*,uint32_t>& blocks,Functor functor) { #pragma omp parallel for for(int b=0;b<blocks.second;b++) functor(blocks.first[b]); } } #endif
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length,ExceptionInfo *exception) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.x); (void) SetImageProperty(image,"tiff:XResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.y); (void) SetImageProperty(image,"tiff:YResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } /* else if (packet_size == 4) { TODO: Figure out what to do there. } */ else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); /* TODO: Remove this when we figure out how to support this */ if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } if (size == 0) return(MagickTrue); layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length, exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
GB_Global.c
//------------------------------------------------------------------------------ // GB_Global: global values in GraphBLAS //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // All Global storage is declared, initialized, and accessed here. The // contents of the GB_Global struct are only accessible to functions in this // file. Global storage is used to keep track of the GraphBLAS mode (blocking // or non-blocking), for pointers to malloc/calloc/realloc/free functions, // global matrix options, and other settings. #include "GB_atomics.h" //------------------------------------------------------------------------------ // Global storage: for all threads in a user application that uses GraphBLAS //------------------------------------------------------------------------------ typedef struct { //-------------------------------------------------------------------------- // blocking/non-blocking mode, set by GrB_init //-------------------------------------------------------------------------- GrB_Mode mode ; // GrB_NONBLOCKING or GrB_BLOCKING bool GrB_init_called ; // true if GrB_init already called //-------------------------------------------------------------------------- // threading control //-------------------------------------------------------------------------- int nthreads_max ; // max number of threads to use double chunk ; // chunk size for determining # threads to use //-------------------------------------------------------------------------- // hypersparsity and CSR/CSC format control //-------------------------------------------------------------------------- float bitmap_switch [GxB_NBITMAP_SWITCH] ; // default bitmap_switch float hyper_switch ; // default hyper_switch for new matrices bool is_csc ; // default CSR/CSC format for new matrices //-------------------------------------------------------------------------- // abort function: only used for debugging //-------------------------------------------------------------------------- void (* abort_function ) (void) ; //-------------------------------------------------------------------------- // malloc/calloc/realloc/free: memory management functions //-------------------------------------------------------------------------- // All threads must use the same malloc/calloc/realloc/free functions. // They default to the ANSI C11 functions, but can be defined by GxB_init. void * (* malloc_function ) (size_t) ; // required // void * (* calloc_function ) (size_t, size_t) ; // no longer used void * (* realloc_function ) (void *, size_t) ; // may be NULL void (* free_function ) (void *) ; // required bool malloc_is_thread_safe ; // default is true //-------------------------------------------------------------------------- // memory usage tracking: for testing and debugging only //-------------------------------------------------------------------------- // malloc_tracking: default is false. There is no user-accessible API for // setting this to true. If true, the following statistics are computed. // If false, all of the following are unused. // nmalloc: To aid in searching for memory leaks, GraphBLAS keeps track of // the number of blocks of allocated that have not yet been freed. The // count starts at zero. GB_malloc_memory and GB_calloc_memory increment // this count, and free (of a non-NULL pointer) decrements it. realloc // increments the count it if is allocating a new block, but it does this // by calling GB_malloc_memory. // malloc_debug: this is used for testing only (GraphBLAS/Tcov). If true, // then use malloc_debug_count for testing memory allocation and // out-of-memory conditions. If malloc_debug_count > 0, the value is // decremented after each allocation of memory. If malloc_debug_count <= // 0, the GB_*_memory routines pretend to fail; returning NULL and not // allocating anything. bool malloc_tracking ; // true if allocations are being tracked int64_t nmalloc ; // number of blocks allocated but not freed bool malloc_debug ; // if true, test memory handling int64_t malloc_debug_count ; // for testing memory handling //-------------------------------------------------------------------------- // for testing and development //-------------------------------------------------------------------------- int64_t hack [2] ; // settings for testing/developement only //-------------------------------------------------------------------------- // diagnostic output //-------------------------------------------------------------------------- bool burble ; // controls GBURBLE output GB_printf_function_t printf_func ; // pointer to printf GB_flush_function_t flush_func ; // pointer to flush //-------------------------------------------------------------------------- // for MATLAB interface only //-------------------------------------------------------------------------- bool print_one_based ; // if true, print 1-based indices //-------------------------------------------------------------------------- // timing: for code development only //-------------------------------------------------------------------------- double timing [40] ; //-------------------------------------------------------------------------- // for malloc debugging only //-------------------------------------------------------------------------- #ifdef GB_DEBUG #define GB_MEMTABLE_SIZE 10000 GB_void *memtable_p [GB_MEMTABLE_SIZE] ; size_t memtable_s [GB_MEMTABLE_SIZE] ; #endif int nmemtable ; //-------------------------------------------------------------------------- // internal memory pool //-------------------------------------------------------------------------- // free_pool [k] is a pointer to a link list of freed blocks, all of size // exactly equal to 2^k. The total number of blocks in the kth pool is // given by free_pool_nblocks [k], and the upper bound on this is given by // free_pool_limit [k]. If any additional blocks of size 2^k above that // limit are freed by GB_dealloc_memory, they are not placed in the pool, // but actually freed instead. void *free_pool [64] ; int64_t free_pool_nblocks [64] ; int64_t free_pool_limit [64] ; //-------------------------------------------------------------------------- // CUDA (DRAFT: in progress) //-------------------------------------------------------------------------- int gpu_count ; // # of GPUs in the system GrB_Desc_Value gpu_control ; // always, never, or default double gpu_chunk ; // min problem size for using a GPU // properties of each GPU: GB_cuda_device gpu_properties [GB_CUDA_MAX_GPUS] ; } GB_Global_struct ; GB_PUBLIC GB_Global_struct GB_Global ; GB_Global_struct GB_Global = { // GraphBLAS mode .mode = GrB_NONBLOCKING, // default is nonblocking // initialization flag .GrB_init_called = false, // GrB_init has not yet been called // max number of threads and chunk size .nthreads_max = 1, .chunk = GB_CHUNK_DEFAULT, // min dimension density #define GB_BITSWITCH_1 ((float) 0.04) #define GB_BITSWITCH_2 ((float) 0.05) #define GB_BITSWITCH_3_to_4 ((float) 0.06) #define GB_BITSWITCH_5_to_8 ((float) 0.08) #define GB_BITSWITCH_9_to_16 ((float) 0.10) #define GB_BITSWITCH_17_to_32 ((float) 0.20) #define GB_BITSWITCH_33_to_64 ((float) 0.30) #define GB_BITSWITCH_gt_than_64 ((float) 0.40) // default format .bitmap_switch = { GB_BITSWITCH_1, GB_BITSWITCH_2, GB_BITSWITCH_3_to_4, GB_BITSWITCH_5_to_8, GB_BITSWITCH_9_to_16, GB_BITSWITCH_17_to_32, GB_BITSWITCH_33_to_64, GB_BITSWITCH_gt_than_64 }, .hyper_switch = GB_HYPER_SWITCH_DEFAULT, .is_csc = (GB_FORMAT_DEFAULT != GxB_BY_ROW), // default is GxB_BY_ROW // abort function for debugging only .abort_function = abort, // malloc/calloc/realloc/free functions: default to ANSI C11 functions .malloc_function = malloc, // .calloc_function = NULL, // no longer used .realloc_function = realloc, .free_function = free, .malloc_is_thread_safe = true, // malloc tracking, for testing, statistics, and debugging only .malloc_tracking = false, .nmalloc = 0, // memory block counter .malloc_debug = false, // do not test memory handling .malloc_debug_count = 0, // counter for testing memory handling // for testing and development only .hack = {0, 0}, // diagnostics .burble = false, .printf_func = NULL, .flush_func = NULL, // for MATLAB interface only .print_one_based = false, // if true, print 1-based indices .timing = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // for malloc debugging only .nmemtable = 0, // memtable is empty // all free_pool lists start out empty .free_pool = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }, .free_pool_nblocks = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // default limits on the number of free blocks in each list: .free_pool_limit = { 0, // size 2^0 = 1 byte none 0, // size 2^1 = 2 none 0, // size 2^2 = 4 none 16483, // size 2^3 = 8 (2^14 blocks * 2^3 = 128 KB total) 16483, // size 2^4 = 16 bytes (2^14 blocks * 2^4 = 256 KB total) 16483, // size 2^5 = 32 (2^14 blocks * 2^5 = 512 KB total) 16483, // size 2^6 = 64 (2^14 blocks * 2^6 = 1 MB total) 16483, // size 2^7 = 128 (2^14 blocks * 2^7 = 2 MB total) 16483, // size 2^8 = 256 (2^14 blocks * 2^8 = 4 MB total) 8192, // size 2^9 = 512 (2^13 blocks * 2^9 = 4 MB total) 4096, // size 2^10 = 1 KB (2^12 blocks * 2^10 = 4 MB total) 2048, // size 2^11 = 2 KB (2^11 blocks * 2^11 = 4 MB total) 1024, // size 2^12 = 4 KB (2^10 blocks * 2^12 = 4 MB total) 512, // size 2^13 = 8 KB (2^9 blocks * 2^13 = 4 MB total) 256, // size 2^14 = 16 KB (2^8 blocks * 2^14 = 4 MB total) 128, // size 2^15 = 32 KB (2^7 blocks * 2^15 = 4 MB total) 64, // size 2^16 = 64 KB (2^6 blocks * 2^16 = 4 MB total) 32, // size 2^17 = 128 KB (2^5 blocks * 2^17 = 4 MB total) 16, // size 2^18 = 256 KB (2^4 blocks * 2^18 = 4 MB total) 8, // size 2^19 = 512 KB (2^3 blocks * 2^19 = 4 MB total) // maximum total size = about 52 MB // by default, no blocks larger than 512 KB are kept in the free_pool 0, // size 2^20 = 1 MB 0, // size 2^21 0, // size 2^22 0, // size 2^23 0, // size 2^24 0, // size 2^25 0, // size 2^26 0, // size 2^27 0, // size 2^28 0, // size 2^29 0, // size 2^30 (1 GB) 0, // size 2^31 0, // size 2^32 0, // size 2^33 0, // size 2^34 0, // size 2^35 0, // size 2^36 0, // size 2^37 0, // size 2^38 0, // size 2^39 // These larger sizes are of course unlikely to appear, but adding all // 64 possibilities means that the free_pool does not need to check an // upper bound. 0, // size 2^40 (1 TB) 0, // size 2^41 0, // size 2^42 0, // size 2^43 0, // size 2^44 0, // size 2^45 0, // size 2^46 0, // size 2^47 0, // size 2^48 0, // size 2^49 0, // size 2^50 (1 PB) 0, // size 2^51 0, // size 2^52 0, // size 2^53 0, // size 2^54 0, // size 2^55 0, // size 2^56 0, // size 2^57 0, // size 2^58 0, // size 2^59 0, // size 2^60 (1 exabyte) 0, // size 2^61 0, // size 2^62 0 }, // size 2^63 (4 exabytes!) // CUDA environment (DRAFT: in progress) .gpu_count = 0, // # of GPUs in the system .gpu_control = GxB_DEFAULT, // always, never, or default .gpu_chunk = GB_GPU_CHUNK_DEFAULT, // min problem size for using a GPU } ; //============================================================================== // GB_Global access functions //============================================================================== //------------------------------------------------------------------------------ // mode //------------------------------------------------------------------------------ void GB_Global_mode_set (GrB_Mode mode) { GB_Global.mode = mode ; } GrB_Mode GB_Global_mode_get (void) { return (GB_Global.mode) ; } //------------------------------------------------------------------------------ // GrB_init_called //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_GrB_init_called_set (bool GrB_init_called) { GB_Global.GrB_init_called = GrB_init_called ; } GB_PUBLIC bool GB_Global_GrB_init_called_get (void) { return (GB_Global.GrB_init_called) ; } //------------------------------------------------------------------------------ // nthreads_max //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_nthreads_max_set (int nthreads_max) { GB_Global.nthreads_max = GB_IMAX (nthreads_max, 1) ; } GB_PUBLIC int GB_Global_nthreads_max_get (void) { return (GB_Global.nthreads_max) ; } //------------------------------------------------------------------------------ // OpenMP max_threads //------------------------------------------------------------------------------ GB_PUBLIC int GB_Global_omp_get_max_threads (void) { return (GB_OPENMP_MAX_THREADS) ; } //------------------------------------------------------------------------------ // chunk //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_chunk_set (double chunk) { if (chunk <= GxB_DEFAULT) chunk = GB_CHUNK_DEFAULT ; GB_Global.chunk = fmax (chunk, 1) ; } GB_PUBLIC double GB_Global_chunk_get (void) { return (GB_Global.chunk) ; } //------------------------------------------------------------------------------ // hyper_switch //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_hyper_switch_set (float hyper_switch) { GB_Global.hyper_switch = hyper_switch ; } GB_PUBLIC float GB_Global_hyper_switch_get (void) { return (GB_Global.hyper_switch) ; } //------------------------------------------------------------------------------ // bitmap_switch //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_bitmap_switch_set (int k, float b) { k = GB_IMAX (k, 0) ; k = GB_IMIN (k, 7) ; GB_Global.bitmap_switch [k] = b ; } GB_PUBLIC float GB_Global_bitmap_switch_get (int k) { k = GB_IMAX (k, 0) ; k = GB_IMIN (k, 7) ; return (GB_Global.bitmap_switch [k]) ; } GB_PUBLIC float GB_Global_bitmap_switch_matrix_get (int64_t vlen, int64_t vdim) { int64_t d = GB_IMIN (vlen, vdim) ; if (d <= 1) return (GB_Global.bitmap_switch [0]) ; if (d <= 2) return (GB_Global.bitmap_switch [1]) ; if (d <= 4) return (GB_Global.bitmap_switch [2]) ; if (d <= 8) return (GB_Global.bitmap_switch [3]) ; if (d <= 16) return (GB_Global.bitmap_switch [4]) ; if (d <= 32) return (GB_Global.bitmap_switch [5]) ; if (d <= 64) return (GB_Global.bitmap_switch [6]) ; return (GB_Global.bitmap_switch [7]) ; } GB_PUBLIC void GB_Global_bitmap_switch_default (void) { GB_Global.bitmap_switch [0] = GB_BITSWITCH_1 ; GB_Global.bitmap_switch [1] = GB_BITSWITCH_2 ; GB_Global.bitmap_switch [2] = GB_BITSWITCH_3_to_4 ; GB_Global.bitmap_switch [3] = GB_BITSWITCH_5_to_8 ; GB_Global.bitmap_switch [4] = GB_BITSWITCH_9_to_16 ; GB_Global.bitmap_switch [5] = GB_BITSWITCH_17_to_32 ; GB_Global.bitmap_switch [6] = GB_BITSWITCH_33_to_64 ; GB_Global.bitmap_switch [7] = GB_BITSWITCH_gt_than_64 ; } //------------------------------------------------------------------------------ // is_csc //------------------------------------------------------------------------------ void GB_Global_is_csc_set (bool is_csc) { GB_Global.is_csc = is_csc ; } bool GB_Global_is_csc_get (void) { return (GB_Global.is_csc) ; } //------------------------------------------------------------------------------ // abort_function //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_abort_function_set (void (* abort_function) (void)) { GB_Global.abort_function = abort_function ; } GB_PUBLIC void GB_Global_abort_function (void) { GB_Global.abort_function ( ) ; } //------------------------------------------------------------------------------ // malloc debuging //------------------------------------------------------------------------------ // These functions keep a separate record of the pointers to all allocated // blocks of memory and their sizes, just for sanity checks. GB_PUBLIC void GB_Global_memtable_dump (void) { #ifdef GB_DEBUG printf ("\nmemtable dump: %d nmalloc %ld\n", GB_Global.nmemtable, GB_Global.nmalloc) ; for (int k = 0 ; k < GB_Global.nmemtable ; k++) { printf (" %4d: %12p : %ld\n", k, GB_Global.memtable_p [k], GB_Global.memtable_s [k]) ; } #endif } GB_PUBLIC int GB_Global_memtable_n (void) { return (GB_Global.nmemtable) ; } GB_PUBLIC void GB_Global_memtable_clear (void) { GB_Global.nmemtable = 0 ; } // add a pointer to the table of malloc'd blocks GB_PUBLIC void GB_Global_memtable_add (void *p, size_t size) { #ifdef GB_DEBUG ASSERT ((p == NULL) == (size == 0)) ; if (p == NULL) return ; bool fail = false ; // printf ("memtable add %p size %ld\n", p, size) ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; fail = (n > GB_MEMTABLE_SIZE) ; if (!fail) { for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { printf ("\nadd duplicate %p size %ld\n", p, size) ; GB_Global_memtable_dump ( ) ; printf ("Hey %d %p\n", i,p) ; fail = true ; break ; } } } if (!fail && p != NULL) { GB_Global.memtable_p [n] = p ; GB_Global.memtable_s [n] = size ; GB_Global.nmemtable++ ; } } ASSERT (!fail) ; // GB_Global_memtable_dump ( ) ; #endif } // get the size of a malloc'd block GB_PUBLIC size_t GB_Global_memtable_size (void *p) { size_t size = 0 ; #ifdef GB_DEBUG if (p == NULL) return (0) ; bool found = false ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { size = GB_Global.memtable_s [i] ; found = true ; break ; } } } if (!found) { printf ("\nFAIL: %p not found\n", p) ; GB_Global_memtable_dump ( ) ; ASSERT (0) ; } #endif return (size) ; } // test if a malloc'd block is in the table GB_PUBLIC bool GB_Global_memtable_find (void *p) { bool found = false ; #ifdef GB_DEBUG if (p == NULL) return (false) ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { found = true ; break ; } } } #endif return (found) ; } // remove a pointer from the table of malloc'd blocks GB_PUBLIC void GB_Global_memtable_remove (void *p) { #ifdef GB_DEBUG if (p == NULL) return ; bool found = false ; // printf ("memtable remove %p ", p) ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { // found p in the table; remove it // printf ("size %ld\n", GB_Global.memtable_s [i]) ; GB_Global.memtable_p [i] = GB_Global.memtable_p [n-1] ; GB_Global.memtable_s [i] = GB_Global.memtable_s [n-1] ; GB_Global.nmemtable -- ; found = true ; break ; } } } if (!found) { printf ("remove %p NOT FOUND\n", p) ; GB_Global_memtable_dump ( ) ; } ASSERT (found) ; // GB_Global_memtable_dump ( ) ; #endif } //------------------------------------------------------------------------------ // malloc_function //------------------------------------------------------------------------------ void GB_Global_malloc_function_set (void * (* malloc_function) (size_t)) { GB_Global.malloc_function = malloc_function ; } void * GB_Global_malloc_function (size_t size) { void *p = NULL ; if (GB_Global.malloc_is_thread_safe) { p = GB_Global.malloc_function (size) ; } else { #pragma omp critical(GB_malloc_protection) { p = GB_Global.malloc_function (size) ; } } #ifdef GB_DEBUG GB_Global_memtable_add (p, size) ; #endif return (p) ; } //------------------------------------------------------------------------------ // calloc_function: no longer used //------------------------------------------------------------------------------ // void GB_Global_calloc_function_set (void * (* calloc_function) (size_t, size_t)) // { // GB_Global.calloc_function = calloc_function ; // } // bool GB_Global_have_calloc_function (void) // { // return (GB_Global.calloc_function != NULL) ; // } // void * GB_Global_calloc_function (size_t count, size_t size) // { // void *p = NULL ; // if (GB_Global.malloc_is_thread_safe) // { // p = GB_Global.calloc_function (count, size) ; // } // else // { // #pragma omp critical(GB_malloc_protection) // { // p = GB_Global.calloc_function (count, size) ; // } // } // #ifdef GB_DEBUG // GB_Global_memtable_add (p, count * size) ; // #endif // return (p) ; // } //------------------------------------------------------------------------------ // realloc_function //------------------------------------------------------------------------------ void GB_Global_realloc_function_set ( void * (* realloc_function) (void *, size_t) ) { GB_Global.realloc_function = realloc_function ; } bool GB_Global_have_realloc_function (void) { return (GB_Global.realloc_function != NULL) ; } void * GB_Global_realloc_function (void *p, size_t size) { void *pnew = NULL ; if (GB_Global.malloc_is_thread_safe) { pnew = GB_Global.realloc_function (p, size) ; } else { #pragma omp critical(GB_malloc_protection) { pnew = GB_Global.realloc_function (p, size) ; } } #ifdef GB_DEBUG if (pnew != NULL) { GB_Global_memtable_remove (p) ; GB_Global_memtable_add (pnew, size) ; } #endif return (pnew) ; } //------------------------------------------------------------------------------ // free_function //------------------------------------------------------------------------------ void GB_Global_free_function_set (void (* free_function) (void *)) { GB_Global.free_function = free_function ; } void GB_Global_free_function (void *p) { if (GB_Global.malloc_is_thread_safe) { GB_Global.free_function (p) ; } else { #pragma omp critical(GB_malloc_protection) { GB_Global.free_function (p) ; } } #ifdef GB_DEBUG GB_Global_memtable_remove (p) ; #endif } //------------------------------------------------------------------------------ // malloc_is_thread_safe //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_is_thread_safe_set (bool malloc_is_thread_safe) { GB_Global.malloc_is_thread_safe = malloc_is_thread_safe ; } GB_PUBLIC bool GB_Global_malloc_is_thread_safe_get (void) { return (GB_Global.malloc_is_thread_safe) ; } //------------------------------------------------------------------------------ // malloc_tracking //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_tracking_set (bool malloc_tracking) { GB_Global.malloc_tracking = malloc_tracking ; } bool GB_Global_malloc_tracking_get (void) { return (GB_Global.malloc_tracking) ; } //------------------------------------------------------------------------------ // nmalloc //------------------------------------------------------------------------------ void GB_Global_nmalloc_clear (void) { GB_ATOMIC_WRITE GB_Global.nmalloc = 0 ; } GB_PUBLIC int64_t GB_Global_nmalloc_get (void) { int64_t nmalloc ; GB_ATOMIC_READ nmalloc = GB_Global.nmalloc ; return (nmalloc) ; } void GB_Global_nmalloc_increment (void) { GB_ATOMIC_UPDATE GB_Global.nmalloc++ ; } GB_PUBLIC void GB_Global_nmalloc_decrement (void) { GB_ATOMIC_UPDATE GB_Global.nmalloc-- ; } //------------------------------------------------------------------------------ // malloc_debug //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_debug_set (bool malloc_debug) { GB_ATOMIC_WRITE GB_Global.malloc_debug = malloc_debug ; } bool GB_Global_malloc_debug_get (void) { bool malloc_debug ; GB_ATOMIC_READ malloc_debug = GB_Global.malloc_debug ; return (malloc_debug) ; } //------------------------------------------------------------------------------ // malloc_debug_count //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_debug_count_set (int64_t malloc_debug_count) { GB_ATOMIC_WRITE GB_Global.malloc_debug_count = malloc_debug_count ; } bool GB_Global_malloc_debug_count_decrement (void) { GB_ATOMIC_UPDATE GB_Global.malloc_debug_count-- ; int64_t malloc_debug_count ; GB_ATOMIC_READ malloc_debug_count = GB_Global.malloc_debug_count ; return (malloc_debug_count <= 0) ; } //------------------------------------------------------------------------------ // hack: for setting an internal flag for testing and development only //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_hack_set (int k, int64_t hack) { GB_Global.hack [k] = hack ; } GB_PUBLIC int64_t GB_Global_hack_get (int k) { return (GB_Global.hack [k]) ; } //------------------------------------------------------------------------------ // burble: for controlling the burble output //------------------------------------------------------------------------------ void GB_Global_burble_set (bool burble) { GB_Global.burble = burble ; } GB_PUBLIC bool GB_Global_burble_get (void) { return (GB_Global.burble) ; } GB_PUBLIC GB_printf_function_t GB_Global_printf_get ( ) { return (GB_Global.printf_func) ; } GB_PUBLIC GB_flush_function_t GB_Global_flush_get ( ) { return (GB_Global.flush_func) ; } GB_PUBLIC void GB_Global_printf_set (GB_printf_function_t pr_func) { GB_Global.printf_func = pr_func ; } GB_PUBLIC void GB_Global_flush_set (GB_flush_function_t fl_func) { GB_Global.flush_func = fl_func ; } //------------------------------------------------------------------------------ // for MATLAB interface only //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_print_one_based_set (bool onebased) { GB_Global.print_one_based = onebased ; } GB_PUBLIC bool GB_Global_print_one_based_get (void) { return (GB_Global.print_one_based) ; } //------------------------------------------------------------------------------ // CUDA (DRAFT: in progress) //------------------------------------------------------------------------------ void GB_Global_gpu_control_set (GrB_Desc_Value gpu_control) { // set the GPU control to always, never, or default if (GB_Global.gpu_count > 0) { // one or more GPUs are available: set gpu_control to // always, never, or default. if (gpu_control == GxB_GPU_ALWAYS || gpu_control == GxB_GPU_NEVER) { GB_Global.gpu_control = gpu_control ; } else { GB_Global.gpu_control = GxB_DEFAULT ; } } else { // no GPUs available: never use a GPU GB_Global.gpu_control = GxB_GPU_NEVER ; } } GrB_Desc_Value GB_Global_gpu_control_get (void) { // get the GPU control parameter return (GB_Global.gpu_control) ; } void GB_Global_gpu_chunk_set (double gpu_chunk) { // set the GPU chunk factor if (gpu_chunk < 1) gpu_chunk = GB_GPU_CHUNK_DEFAULT ; GB_Global.gpu_chunk = gpu_chunk ; } double GB_Global_gpu_chunk_get (void) { // get the GPU chunk factor return (GB_Global.gpu_chunk) ; } bool GB_Global_gpu_count_set (bool enable_cuda) { // set the # of GPUs in the system; // this function is only called once, by GB_init. #if defined ( GBCUDA ) if (enable_cuda) { return (GB_cuda_get_device_count (&GB_Global.gpu_count)) ; } else #endif { // no GPUs available, or available but not requested GB_Global.gpu_count = 0 ; return (true) ; } } int GB_Global_gpu_count_get (void) { // get the # of GPUs in the system return (GB_Global.gpu_count) ; } #define GB_GPU_DEVICE_CHECK(error) \ if (device < 0 || device >= GB_Global.gpu_count) return (error) ; size_t GB_Global_gpu_memorysize_get (int device) { // get the memory size of a specific GPU GB_GPU_DEVICE_CHECK (0) ; // memory size zero if invalid GPU return (GB_Global.gpu_properties [device].total_global_memory) ; } int GB_Global_gpu_sm_get (int device) { // get the # of SMs in a specific GPU GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU return (GB_Global.gpu_properties [device].number_of_sms) ; } bool GB_Global_gpu_device_pool_size_set( int device, size_t size) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU GB_Global.gpu_properties [device].pool_size = (int) size ; return( true); } bool GB_Global_gpu_device_max_pool_size_set( int device, size_t size) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU GB_Global.gpu_properties[device].max_pool_size = (int) size ; return( true); } bool GB_Global_gpu_device_memory_resource_set( int device, void *resource) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU GB_Global.gpu_properties[device].memory_resource = resource; return( true); } void* GB_Global_gpu_device_memory_resource_get( int device ) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU return ( GB_Global.gpu_properties [device].memory_resource ) ; //NOTE: this returns a void*, needs to be cast to be used } bool GB_Global_gpu_device_properties_get (int device) { // get all properties of a specific GPU; // this function is only called once per GPU, by GB_init. GB_GPU_DEVICE_CHECK (false) ; // fail if invalid GPU #if defined ( GBCUDA ) return (GB_cuda_get_device_properties (device, &(GB_Global.gpu_properties [device]))) ; #else // if no GPUs exist, they cannot be queried return (false) ; #endif } //------------------------------------------------------------------------------ // timing: for code development only //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_timing_clear_all (void) { for (int k = 0 ; k < 40 ; k++) { GB_Global.timing [k] = 0 ; } } GB_PUBLIC void GB_Global_timing_clear (int k) { GB_Global.timing [k] = 0 ; } GB_PUBLIC void GB_Global_timing_set (int k, double t) { GB_Global.timing [k] = t ; } GB_PUBLIC void GB_Global_timing_add (int k, double t) { GB_Global.timing [k] += t ; } GB_PUBLIC double GB_Global_timing_get (int k) { return (GB_Global.timing [k]) ; } //------------------------------------------------------------------------------ // free_pool: fast access to free memory blocks //------------------------------------------------------------------------------ // each free block contains a pointer to the next free block. This requires // the free block to be at least 8 bytes in size. #define GB_NEXT(p) ((void **) p) [0] // free_pool_init: initialize the free_pool GB_PUBLIC void GB_Global_free_pool_init (bool clear) { #pragma omp critical(GB_free_pool) { if (clear) { // clear the free pool for (int k = 0 ; k < 64 ; k++) { GB_Global.free_pool [k] = NULL ; GB_Global.free_pool_nblocks [k] = 0 ; } } // set the default free_pool_limit for (int k = 0 ; k < 64 ; k++) { GB_Global.free_pool_limit [k] = 0 ; } int64_t n = 16384 ; for (int k = 3 ; k <= 8 ; k++) { GB_Global.free_pool_limit [k] = n ; } for (int k = 9 ; k <= 19 ; k++) { n = n/2 ; GB_Global.free_pool_limit [k] = n ; } } } #ifdef GB_DEBUG // check if a block is valid static inline void GB_Global_free_pool_check (void *p, int k, char *where) { // check the size of the block // printf ("check %p\n", p) ; ASSERT (k >= 3 && k < 64) ; ASSERT (p != NULL) ; size_t size = GB_Global_memtable_size (p) ; ASSERT (size == ((size_t) 1) << k) ; } #endif // free_pool_get: get a block from the free_pool, or return NULL if none GB_PUBLIC void *GB_Global_free_pool_get (int k) { void *p = NULL ; ASSERT (k >= 3 && k < 64) ; #pragma omp critical(GB_free_pool) { p = GB_Global.free_pool [k] ; if (p != NULL) { // remove the block from the kth free_pool GB_Global.free_pool_nblocks [k]-- ; GB_Global.free_pool [k] = GB_NEXT (p) ; } } if (p != NULL) { // clear the next pointer inside the block, since the block needs // to be all zero // printf ("got %p k %d\n", p, k) ; #ifdef GB_DEBUG GB_Global_free_pool_check (p, k, "get") ; #endif // GB_Global_free_pool_dump (2) ; printf ("\ndid get\n\n") ; } return (p) ; } // free_pool_put: put a block in the free_pool, unless it is full GB_PUBLIC bool GB_Global_free_pool_put (void *p, int k) { #ifdef GB_DEBUG GB_Global_free_pool_check (p, k, "put") ; #endif bool returned_to_pool = false ; #pragma omp critical(GB_free_pool) { returned_to_pool = (GB_Global.free_pool_nblocks [k] < GB_Global.free_pool_limit [k]) ; if (returned_to_pool) { // add the block to the head of the free_pool list // printf ("put %p k %d\n", p, k) ; GB_Global.free_pool_nblocks [k]++ ; GB_NEXT (p) = GB_Global.free_pool [k] ; GB_Global.free_pool [k] = p ; } } // GB_Global_free_pool_dump (2) ; printf ("\ndid put\n\n") ; return (returned_to_pool) ; } // free_pool_dump: check the validity of the free_pool GB_PUBLIC void GB_Global_free_pool_dump (int pr) { #ifdef GB_DEBUG bool fail = false ; #pragma omp critical(GB_free_pool) { for (int k = 0 ; k < 64 && !fail ; k++) { int64_t nblocks = GB_Global.free_pool_nblocks [k] ; int64_t limit = GB_Global.free_pool_limit [k] ; if (nblocks != 0 && pr > 0) { printf ("pool %2d: %8ld blocks, %8ld limit\n", k, nblocks, limit) ; } int64_t nblocks_actual = 0 ; void *p = GB_Global.free_pool [k] ; for ( ; p != NULL && !fail ; p = GB_NEXT (p)) { if (pr > 1) printf (" %16p ", p) ; size_t size = GB_Global_memtable_size (p) ; if (pr > 1) printf ("size: %ld\n", size) ; nblocks_actual++ ; fail = fail || (size != ((size_t) 1) << k) ; if (fail && pr > 0) printf (" fail\n") ; fail = fail || (nblocks_actual > nblocks) ; } if (nblocks_actual != nblocks) { if (pr > 0) printf ("fail: # blocks %ld %ld\n", nblocks_actual, nblocks) ; fail = true ; } } } ASSERT (!fail) ; #endif } // free_pool_limit_get: get the limit on the # of blocks in the kth pool GB_PUBLIC int64_t GB_Global_free_pool_limit_get (int k) { int64_t nblocks = 0 ; if (k >= 3 && k < 64) { #pragma omp critical(GB_free_pool) { nblocks = GB_Global.free_pool_limit [k] ; } } return (nblocks) ; } // free_pool_limit_set: set the limit on the # of blocks in the kth pool GB_PUBLIC void GB_Global_free_pool_limit_set (int k, int64_t nblocks) { if (k >= 3 && k < 64) { #pragma omp critical(GB_free_pool) { GB_Global.free_pool_limit [k] = nblocks ; } } } // free_pool_nblocks_total: total # of blocks in free_pool (for debug only) GB_PUBLIC int64_t GB_Global_free_pool_nblocks_total (void) { int64_t nblocks = 0 ; #pragma omp critical(GB_free_pool) { for (int k = 0 ; k < 64 ; k++) { nblocks += GB_Global.free_pool_nblocks [k] ; } } return (nblocks) ; }
GB_unaryop__lnot_fp32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_bool // op(A') function: GB_tran__lnot_fp32_bool // C type: float // A type: bool // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_bool ( float *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 0; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) #define PARALLEL_A() { \ _Pragma("omp parallel num_threads(33) if (0)") \ { \ int tid = omp_get_thread_num(); \ int cs = N / omp_get_num_threads(); \ int lb = tid * cs; \ int ub = (tid+1)*cs; \ ub = ub > N ? N : ub; \ for (int i = lb; i < ub; i++) { \ A[i] = D[i]; \ } \ _Pragma("omp barrier") \ double sum = 0; \ for (int i = 1+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid] = sum; \ sum = 0; \ for (int i = 2+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid+1] = sum; \ _Pragma("omp barrier") \ B[tid] = A[tid]-A[tid+1]; \ } \ } #define BODY_B() { \ int tid = omp_get_thread_num(); \ int cs = N / omp_get_num_threads(); \ int lb = tid * cs; \ int ub = (tid+1)*cs; \ ub = ub > N ? N : ub; \ for (int i = lb; i < ub; i++) { \ A[i] = D[i]; \ } \ _Pragma("omp barrier") \ double sum = 0; \ for (int i = 1+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid] = sum; \ _Pragma("omp barrier") \ C[tid] = A[tid]-A[tid+1]-tid; \ if (tid < omp_get_num_threads()-1) B[tid] += C[tid]; \ } #define PARALLEL_B() { \ _Pragma("omp parallel num_threads(threads[0])") \ { \ BODY_B(); \ } \ } #define PARALLEL_B5() { PARALLEL_B() PARALLEL_B() PARALLEL_B() PARALLEL_B() PARALLEL_B() } #define BODY_NP() { \ _Pragma("omp parallel num_threads(16)") { \ int b = omp_get_thread_num()*16; \ _Pragma("omp parallel num_threads(16)") { \ int tid = omp_get_thread_num(); \ int cs = N / omp_get_num_threads(); \ int lb = tid * cs; \ int ub = (tid+1)*cs; \ ub = ub > N ? N : ub; \ for (int i = lb; i < ub; i++) { \ A[i] = D[i]; \ } \ _Pragma("omp barrier") \ double sum = 0; \ for (int i = 1+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid] = sum; \ _Pragma("omp barrier") \ C[tid] = A[tid]-A[tid+1]-tid; \ if (tid < omp_get_num_threads()-1) B[b+tid] += C[tid]; else B[b+tid]=0; \ } \ } \ } int main(void) { check_offloading(); double A[N+2], B[N+2], C[N+2], D[N+2], E[N+2]; INIT(); long cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 768; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; // // Test: Barrier in a serialized parallel region. // TESTD("omp target teams num_teams(1) thread_limit(32)", { PARALLEL_A() }, VERIFY(0, 1, B[i], i+1)); // // Test: Barrier in a parallel region. // for (int t = 1; t <= max_threads; t += (t < 32) ? 31 : 32) { int threads[1]; threads[0] = t; TESTD("omp target teams num_teams(1) thread_limit(max_threads)", { ZERO(B); PARALLEL_B5() }, VERIFY(0, threads[0]-1, B[i], 5)); } DUMP_SUCCESS(gpu_threads-max_threads); // // Test: Barrier in consecutive parallel regions with variable # of threads. // TESTD("omp target teams num_teams(1) thread_limit(max_threads)", { ZERO(B); for (int t = 32; t <= max_threads; t += 32) { int threads[1]; threads[0] = t; PARALLEL_B() } }, VERIFY(0, max_threads-1, B[i], (max_threads / 32) - (i+1) / 32)); // // Test: Single thread in target region. // TESTD("#pragma omp target", { ZERO(B); BODY_B() }, VERIFY(0, 1, C[i], 491535)); // // Test: Barrier in target parallel. // for (int t = 1; t <= max_threads; t += (t < 32) ? 31 : 32) { ZERO(B); int threads; threads = t; TESTD("omp target parallel num_threads(threads)", { BODY_B(); }, VERIFY(0, t-1, B[i], (trial+1)*1.0)); } DUMP_SUCCESS(gpu_threads-max_threads); #if 0 // // Test: Barrier in nested parallel in target region. // if (!cpuExec) { ZERO(B); TEST({ BODY_NP(); }, VERIFY(0, 16*16, B[i], (i > 0 && (i+1) % 16 == 0 ? 0 : (trial+1)*1)) ); } else { DUMP_SUCCESS(1); } #endif DUMP_SUCCESS(1); // target parallel + parallel // target + simd // target/teams/parallel with varying numbers of threads }
GB_unaryop__minv_fp32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_uint8 // op(A') function: GB_tran__minv_fp32_uint8 // C type: float // A type: uint8_t // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_uint8 ( float *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nn_par.c
#include <omp.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <sys/time.h> #define NROW 1024 #define NCOL NROW #define TEST_RESULTS struct timeval startTime; struct timeval finishTime; double timeIntervalLength; //Input Array X double inputArrayX [NROW][NCOL]; //Weights double Weight [NROW][NCOL]; //Final output from SOFTMAX double outputY [NROW]; //Total Sum double total_sum; //Ground Truth double Ground [NROW]; //Loss double Loss [NROW]; //Check double val_sum; int main(int argc, char* argv[]) { int i,j,k; //INITIALIZE ARRAYS for(i=0;i<NROW;i++) { outputY[i] = 0; Ground[i] = 1; for(j=0;j<NCOL;j++) { inputArrayX[i][j]= 0.5; Weight[i][j]= 0.4; } } total_sum = 0; //Get the start time gettimeofday(&startTime, NULL); /* START TIME */ //===================================================================== /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel shared(inputArrayX, Weight, outputY, total_sum) { /* Obtain thread number */ int tid = omp_get_thread_num(); int nthreads = omp_get_num_threads(); //Y = RELU(AVE(XW)) for (int i=0; i<NROW; i+=nthreads) { if(tid == i % nthreads) { for(int j=0; j<NCOL; j++) { double mysum = 0; for(int k=0; k<NCOL; k++) { mysum+=inputArrayX[i][k]*Weight[k][j]; } outputY[i] = mysum; outputY[i] /= NCOL; } if (outputY[i] < 0){outputY[i] = 0;} } } } //Y = SOFTMAX(Y) //Loss = Ground - Y for (int i =0; i < NROW; i++){ outputY[i] = exp(outputY[i]); total_sum += outputY[i]; } for (int i =0; i < NROW; i++){ outputY[i] /= total_sum; Loss[i] = outputY[i] - Ground[i]; } //===================================================================== //Get the end time gettimeofday(&finishTime, NULL); /* after time */ #ifdef TEST_RESULTS //CALCULATE TOTAL SUM //[Just for verification] val_sum=0; // for(i=0;i<NROW;i++) { val_sum+=(double)Loss[i]; } printf("\nTotal Sum = %g\n",val_sum); #endif //Calculate the interval length timeIntervalLength = (double)(finishTime.tv_sec-startTime.tv_sec) * 1000000 + (double)(finishTime.tv_usec-startTime.tv_usec); timeIntervalLength=timeIntervalLength/1000; //Print the interval lenght printf("Interval length: %g msec.\n", timeIntervalLength); return 0; }
GB_unop__identity_uint64_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint32) // op(A') function: GB (_unop_tran__identity_uint64_uint32) // C type: uint64_t // A type: uint32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint32) ( uint64_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
addn.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_ADDN_H_ #define MACE_KERNELS_ADDN_H_ #if defined(MACE_ENABLE_NEON) && defined(__aarch64__) #include <arm_neon.h> #endif #include <algorithm> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/kernel.h" namespace mace { namespace kernels { constexpr int kCostPerGroup = 1024; template <DeviceType D, typename T> struct AddNFunctor : OpKernel { explicit AddNFunctor(OpKernelContext *context) : OpKernel(context) {} MaceStatus operator()(const std::vector<const Tensor *> &input_tensors, Tensor *output_tensor, StatsFuture *future) { MACE_UNUSED(future); MACE_RETURN_IF_ERROR(output_tensor->ResizeLike(input_tensors[0])); index_t size = output_tensor->size(); Tensor::MappingGuard output_map(output_tensor); float *output_data = output_tensor->mutable_data<float>(); memset(output_data, 0, size * sizeof(float)); int n = input_tensors.size(); int64_t cost = size * n; int64_t groups = 1; if (cost > kCostPerGroup) { groups = cost / kCostPerGroup; } int64_t element_per_group = size / groups; std::vector<Tensor::MappingGuard> mappers; for (int64_t i = 0; i < n; ++i) { mappers.emplace_back(Tensor::MappingGuard(input_tensors[i])); } #pragma omp parallel for for (int64_t i = 0; i < size; i += element_per_group) { int64_t count = std::min(element_per_group, size - i); int nn = count >> 2; int remain = count - (nn << 2); for (int64_t j = 0; j < n; ++j) { const float *input_data = input_tensors[j]->data<float>(); const float *input_ptr = input_data + i; float *output_ptr = output_data + i; for (int k = 0; k < nn; ++k) { #if defined(MACE_ENABLE_NEON) && defined(__aarch64__) float32x4_t in = vld1q_f32(input_ptr); float32x4_t out = vld1q_f32(output_ptr); out = vaddq_f32(out, in); vst1q_f32(output_ptr, out); #else for (int m = 0; m < 4; ++m) { output_ptr[m] += input_ptr[m]; } #endif input_ptr += 4; output_ptr += 4; } for (int k = 0; k < remain; ++k) { *output_ptr += *input_ptr; ++input_ptr; ++output_ptr; } } } return MACE_SUCCESS; } }; #ifdef MACE_ENABLE_OPENCL class OpenCLAddNKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const std::vector<const Tensor *> &input_tensors, Tensor *output_tensor, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLAddNKernel); }; template <typename T> struct AddNFunctor<DeviceType::GPU, T> : OpKernel { explicit AddNFunctor(OpKernelContext *context); MaceStatus operator()(const std::vector<const Tensor *> &input_tensors, Tensor *output_tensor, StatsFuture *future); std::unique_ptr<OpenCLAddNKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_ADDN_H_
gmx_amide.c
/* gmx_amide.c is the main source file for the program g_amide, used for estimating * site energies, coupling constants, and transition dipole moment parameters * for Amide I vibrations of protein and peptides systems. * * Written by Mike Reppert (2015) */ #include "gmx_amide_map.h" #include "gmx_amide_bond.h" #include "gmx_amide_charge.h" #include "mem_helper.h" #include "statutil.h" #include "copyrite.h" #include "sysstuff.h" #include "txtdump.h" #include "futil.h" #include "tpxio.h" #include "physics.h" #include "macros.h" #include "gmx_fatal.h" #include "index.h" #include "smalloc.h" #include "vec.h" #include "xvgr.h" #include "gstat.h" #include "string2.h" #include "pbc.h" #include "bondf.h" #if OMP_PARALLEL #include "omp.h" #endif /******************************************************************************** * Site Assignments * ********************************************************************************/ int find_map_sites( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds ) { int b,i,j,at,at0; for(b=0; b<bonds; b++) { p_pb[b].MapAtoms = (int**) malloc(map.nsites*sizeof(int*)); if(p_pb[b].MapAtoms==NULL) { printf("Error allocating memory for MapAtoms of bond %d.\n", b+1); return 0; } p_pb[b].nsites = map.nsites; p_pb[b].natoms = (int*) malloc(map.nsites*sizeof(int)); if(p_pb[b].natoms==NULL) { printf("Error allocating memory for AtomSite numbers of bond %d.\n", b+1); return 0; } for(i=0; i<map.nsites; i++) p_pb[b].natoms[i] = 0; for(i=0; i<map.nsites; i++) { p_pb[b].MapAtoms[i] = (int*) malloc(map.MapSites[i].natoms*sizeof(int)); if(p_pb[b].MapAtoms[i]==NULL) { printf("Error allocating memory for MapAtoms of bond %d, map site %d.\n", b+1, i+1); return 0; } p_pb[b].natoms[i] = map.MapSites[i].natoms; for(j=0; j<map.MapSites[i].natoms; j++) { at0 = -1; if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.MapSites[i].AtomPaths[j].Path[0], "H", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding mapping atoms. Mapping atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.MapSites[i].AtomPaths[j].Path[0]); return 0; } // Should there be an error-catch here to be sure that at has been assigned? at = trace_path(&pbc, top, x, at0, map.MapSites[i].AtomPaths[j]); p_pb[b].MapAtoms[i][j] = at; } } } return 1; } int find_coup_sites( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds ) { int b,i,j,at,at0; for(b=0; b<bonds; b++) { p_pb[b].CoupAtoms = (int**) malloc(map.ncoupsites*sizeof(int*)); if(p_pb[b].CoupAtoms==NULL) { printf("Error allocating memory for CoupAtoms of bond %d.\n", b+1); return 0; } p_pb[b].ncoupsites = map.ncoupsites; p_pb[b].ncoupatoms = (int*) malloc(map.ncoupsites*sizeof(int)); if(p_pb[b].ncoupatoms==NULL) { printf("Error allocating memory for CoupSite numbers of bond %d.\n", b+1); return 0; } for(i=0; i<map.ncoupsites; i++) p_pb[b].ncoupatoms[i] = 0; for(i=0; i<map.ncoupsites; i++) { p_pb[b].CoupAtoms[i] = (int*) malloc(map.CoupSites[i].natoms*sizeof(int)); if(p_pb[b].CoupAtoms[i]==NULL) { printf("Error allocating memory for CoupAtoms of bond %d, coupling site %d.\n", b+1, i+1); return 0; } p_pb[b].ncoupatoms[i] = map.CoupSites[i].natoms; for(j=0; j<map.CoupSites[i].natoms; j++) { at0 = -1; if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.CoupSites[i].AtomPaths[j].Path[0], "H", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding coupling site atoms. Coupling atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.CoupSites[i].AtomPaths[j].Path[0]); return 0; } // Should there be an error-catch here to be sure that at has been assigned? at = trace_path(&pbc, top, x, at0, map.CoupSites[i].AtomPaths[j]); p_pb[b].CoupAtoms[i][j] = at; } } } return 1; } // Identify excluded atoms for each amide bond. int find_excluded_atoms( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds) { int b,i,at,at0; for(b=0; b<bonds; b++) { p_pb[b].nexcluded = 0; int nfound = 0; int nalloc = 0; int* ExArray; // Our initial guess is that there are just map.npaths excluded atoms. Could be more or less. ExArray = (int*) malloc(map.npaths*sizeof(int)); nalloc = map.npaths; if(ExArray==NULL) { printf("Error allocating memory for excluded atom indices of bond %d.\n", b+1); return 0; } for(i=0; i<map.npaths; i++) { at0 = -1; // First, identify the starting point for the path, i.e. the index of the atom // corresponding to the first path entry. if(strncmp(map.ExcludedAtomPaths[i].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "H", 6)==0) at0 = p_pb[b].H; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "CD", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding excluded atoms. Excluded atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.ExcludedAtomPaths[i].Path[0]); return 0; } // Next, trace the atom indices from at0 to the end of the path. // If the path is incomplete (which it will be most of the time) // at<0 is returned. at = trace_path(&pbc, top, x, at0, map.ExcludedAtomPaths[i]); if(at>=0) { int oldat; int isnew = 1; // If we find a new atom, first check if it's alredy included in ExArray. // If not, go ahead and add it. for(oldat=0; oldat<nfound; oldat++) { if(ExArray[oldat]==at) isnew = 0; } if(isnew) { ExArray[nfound] = at; nfound++; } } } // Now trim the allocated memory to keep only the indices for found atoms. p_pb[b].Excluded = (int*) realloc(ExArray, nfound*sizeof(int)); if( (p_pb[b].Excluded==NULL) && (nfound!=0) ) { printf("Error trimming excess memory from protein bond %d excluded atoms array.\n", b+1); p_pb[b].nexcluded = 0; return 0; } p_pb[b].nexcluded = nfound; } return 1; } // Identify Coupling sites for each amide bond int find_coupling_sites( t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds) { int b,i,at,at0; for(b=0; b<bonds; b++) { p_pb[b].nexcluded = 0; int nfound = 0; int nalloc = 0; int* ExArray; // Our initial guess is that there are just map.npaths excluded atoms. Could be more or less. ExArray = (int*) malloc(map.npaths*sizeof(int)); nalloc = map.npaths; if(ExArray==NULL) { printf("Error allocating memory for excluded atom indices of bond %d.\n", b+1); return 0; } for(i=0; i<map.npaths; i++) { at0 = -1; // First, identify the starting point for the path, i.e. the index of the atom // corresponding to the first path entry. if(strncmp(map.ExcludedAtomPaths[i].Path[0], "C", 6)==0) at0 = p_pb[b].C; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "O", 6)==0) at0 = p_pb[b].O; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "N", 6)==0) at0 = p_pb[b].N; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "H", 6)==0) at0 = p_pb[b].H; else if(strncmp(map.ExcludedAtomPaths[i].Path[0], "CD", 6)==0) at0 = p_pb[b].H; if(at0==-1) { printf("Error finding excluded atoms. Excluded atom path #%d starts at ", i); printf("an atom (%s) not found in the peptide bond.\n", map.ExcludedAtomPaths[i].Path[0]); return 0; } // Next, trace the atom indices from at0 to the end of the path. // If the path is incomplete (which it will be most of the time) // at<0 is returned. at = trace_path(&pbc, top, x, at0, map.ExcludedAtomPaths[i]); if(at>=0) { int oldat; int isnew = 1; // If we find a new atom, first check if it's alredy included in ExArray. // If not, go ahead and add it. for(oldat=0; oldat<nfound; oldat++) { if(ExArray[oldat]==at) isnew = 0; } if(isnew) { ExArray[nfound] = at; nfound++; } } } // Now trim the allocated memory to keep only the indices for found atoms. p_pb[b].Excluded = (int*) realloc(ExArray, nfound*sizeof(int)); if( (p_pb[b].Excluded==NULL) && (nfound!=0) ) { printf("Error trimming excess memory from protein bond %d excluded atoms array.\n", b+1); p_pb[b].nexcluded = 0; return 0; } p_pb[b].nexcluded = nfound; } return 1; } /******************************************************************************** * Memory Allocation * ********************************************************************************/ int set_arrays(real ****p_elecData, real ****p_ProElecData, int nelec, real ***p_angleData, real ***p_ProAngleData, int nangles, real ***p_freqData, real ***p_ProFreqData, int nfreq, real ****p_coupData, int ncoup, rvec ***p_coordData, rvec ***p_ProCoordData, rvec **p_dipData, rvec **p_centData, matrix **p_RotMat, matrix **p_ProRotMat, int bonds, int nsites, int nPro, int nProSites) { int error = 0; int i,ix; *p_coordData = (rvec**) malloc(bonds*sizeof(rvec*)); if(*p_coordData==NULL) error = 1; else { for(i=0; i<bonds; i++) { *(*p_coordData+i) = (rvec*) malloc(nsites*sizeof(rvec)); if(*(*p_coordData+i)==NULL) { error = 1; break; } } if(error) { if(i<nsites) { for(ix=0; ix<i; ix++) free(*(*p_coordData+ix)); free(*p_coordData); } printf("Error allocating memory for Coordinate data array.\n"); } } if(!error) { *p_dipData = (rvec*) malloc(bonds*sizeof(rvec)); if(*p_dipData==NULL) { error = 2; printf("Error allocating memory for Dipole moment data.\n"); } } if(!error) { *p_centData = (rvec*) malloc(bonds*sizeof(rvec)); if(*p_centData==NULL) { error = 3; printf("Error allocating memory for bond Center data.\n"); } } if(!error) { *p_RotMat = (matrix*) malloc(bonds*sizeof(matrix)); if(*p_RotMat==NULL) { error = 4; printf("Error allocating memory for Rotation Matrix data.\n"); } } if(!error) { if(!allocate_3d_array_real(p_elecData, bonds, nsites, nelec)) { printf("Error allocating Electrostatic variable array\n"); error = 5; } else if(!allocate_2d_array_real(p_angleData, bonds, nangles)) { printf("Error allocating Angles variable array\n"); error = 6; } else if(!allocate_2d_array_real(p_freqData, bonds, nfreq)) { printf("Error allocating Frequency variable array\n"); error = 7; } else if(!allocate_3d_array_real(p_coupData, bonds, bonds, ncoup)) { printf("Error allocating Coupling variable array\n"); error = 8; } } if(!error) { if(nProSites!=0) { if(!allocate_3d_array_real(p_ProElecData, nPro, nProSites, nelec)) { printf("Error allocating Proline Electrostatic variable array\n"); error = 9; } else if(!allocate_2d_array_real(p_ProAngleData, nPro, nangles)) { printf("Error allocating Proline Angles variable array\n"); error = 10; } else if(!allocate_2d_array_real(p_ProFreqData, nPro, nfreq)) { printf("Error allocating Proline Frequency variable array\n"); error = 11; } } *p_ProCoordData = (rvec**) malloc(nPro*sizeof(rvec*)); if(*p_ProCoordData==NULL) error = 12; else { for(i=0; i<nPro; i++) { *(*p_ProCoordData+i) = (rvec*) malloc(nProSites*sizeof(rvec)); if(*(*p_ProCoordData+i)==NULL) { error = 13; break; } } if(error) { if(i<nProSites) { for(ix=0; ix<i; ix++) free(*(*p_ProCoordData+ix)); free(*p_ProCoordData); } printf("Error allocating memory for Proline Coordinate data array.\n"); } } if(!error) { *p_ProRotMat = (matrix*) malloc(nPro*sizeof(matrix)); if(*p_ProRotMat==NULL) { error = 14; printf("Error allocating memory for Proline Rotation Matrix data.\n"); } } } if(!error) return 1; else { if(error>1) { for(i=0; i<bonds; i++) free(*(*p_coordData+i)); free(*p_coordData); } if(error>2) free(*p_dipData); if(error>3) free(*p_centData); if(error>4) free(*p_RotMat); if(error>5) free_3d_array_real(*p_elecData, bonds, nsites, nelec); if(error>6) free_2d_array_real(*p_angleData, bonds, nangles); if(error>7) free_2d_array_real(*p_freqData, bonds, nfreq); if(error>8) free_3d_array_real(*p_coupData, bonds, bonds, nfreq); if(error>9) free_3d_array_real(*p_ProElecData, nPro, nProSites, nelec); if(error>10) free_2d_array_real(*p_ProAngleData, nPro, nangles); if(error>11) free_2d_array_real(*p_ProFreqData, nPro, nfreq); if(error>12) { for(i=0; i<nPro; i++) free(*(*p_ProCoordData+i)); free(*p_ProCoordData); } if(error>13) free(*p_ProRotMat); return 0; } } int unset_arrays(real ***elecData, real ***ProElecData, int nelec, real **angleData, real **ProAngleData, int nangles, real **freqData, real **ProFreqData, int nfreq, real ***coupData, int ncoup, rvec **coordData, rvec **ProCoordData, rvec *dipData, rvec *centData, matrix *RotMat, matrix *ProRotMat, int bonds, int nsites, int nPro, int nProSites) { int i; free_3d_array_real(elecData, bonds, nsites, nelec); free_2d_array_real(angleData, bonds, nangles); free_2d_array_real(freqData, bonds, nfreq); free_3d_array_real(coupData, bonds, bonds, ncoup); for(i=0; i<bonds; i++) free(coordData[i]); free(dipData); free(centData); free(RotMat); if(nProSites!=0) { free_2d_array_real(ProFreqData, nPro, nfreq); free_3d_array_real(ProElecData, nPro, nProSites, nelec); free_2d_array_real(ProAngleData, nPro, nangles); free(ProRotMat); } return 1; } /******************************************************************************** * Parameters * ********************************************************************************/ int get_angles(t_pbc* p_pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, real **angleData, int nangles) { const real rad2deg = 57.29578; rvec r_ij,r_jk,r_kl,m,n; real sign; int t1,t2,t3; int b; t_protbond pb; for(b=0; b<bonds; b++) { pb = p_pb[b]; if(pb.dih_set_N) { angleData[b][0] = rad2deg*dih_angle(x[pb.phiN[0]], x[pb.phiN[1]], x[pb.phiN[2]], x[pb.phiN[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); angleData[b][1] = rad2deg*dih_angle(x[pb.psiN[0]], x[pb.psiN[1]], x[pb.psiN[2]], x[pb.psiN[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); } else { angleData[b][0] = -1000.0; angleData[b][1] = -1000.0; } if(pb.dih_set_C) { angleData[b][2] = rad2deg*dih_angle(x[pb.phiC[0]], x[pb.phiC[1]], x[pb.phiC[2]], x[pb.phiC[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); angleData[b][3] = rad2deg*dih_angle(x[pb.psiC[0]], x[pb.psiC[1]], x[pb.psiC[2]], x[pb.psiC[3]], p_pbc, r_ij, r_jk, r_kl, m, n, &sign, &t1, &t2, &t3); } else { angleData[b][2] = -1000.0; angleData[b][3] = -1000.0; } } return 1; } real get_NN_val(real phi, real psi, real **data, int dim1, int dim2) { real val; real f_phi, f_psi; real start = -180.0; real stop = 180.0; real step1 = (stop - start) / ( (real) (dim1-1) ); real step2 = (stop - start) / ( (real) (dim2-1) ); int phi_ndx = (int) ( (phi-start)/step1 ); int psi_ndx = (int) ( (psi-start)/step2 ); f_phi = (phi - (start+phi_ndx*step1) ) / step1; f_psi = (psi - (start+psi_ndx*step2) ) / step2; if(phi_ndx==-1) phi_ndx = dim1-2; if(psi_ndx==-1) psi_ndx = dim2-2; if(phi_ndx==dim1-1) phi_ndx = 0; if(psi_ndx==dim2-1) psi_ndx = 0; val = 0.0; if( (phi_ndx>=0) && (psi_ndx>=0) && (phi_ndx<dim1) && (psi_ndx<dim2)) { val += (1-f_phi)*((1-f_psi)*data[phi_ndx][psi_ndx] + f_psi*data[phi_ndx][psi_ndx+1] ); val += f_phi * ((1-f_psi)*data[phi_ndx+1][psi_ndx]+f_psi*data[phi_ndx+1][psi_ndx+1] ); } return val; } int get_coordinates(t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, matrix *RotMat, rvec **coordData) { int b,i,j; rvec x_ax, y_ax, z_ax, CN_vec; rvec dx; for(b=0; b<bonds; b++) { // RotMat holds the molecular coordinate unit vectors pbc_dx(&pbc, x[p_pb[b].O], x[p_pb[b].C], x_ax); unitv(x_ax, x_ax); pbc_dx(&pbc, x[p_pb[b].N], x[p_pb[b].C], CN_vec); cprod(x_ax, CN_vec, z_ax); unitv(z_ax, z_ax); cprod(z_ax, x_ax, y_ax); unitv(y_ax, y_ax); for(i=0; i<3; i++) { RotMat[b][i][0] = x_ax[i]; RotMat[b][i][1] = y_ax[i]; RotMat[b][i][2] = z_ax[i]; } clear_rvec(dx); for(i=0; i<map.nsites; i++) { clear_rvec(coordData[b][i]); // MapAtoms[i][0] is our reference point. copy_rvec(x[p_pb[b].MapAtoms[i][0]], coordData[b][i]); for(j=1; j<map.MapSites[i].natoms; j++) { pbc_dx(&pbc, x[p_pb[b].MapAtoms[i][j]], x[p_pb[b].MapAtoms[i][0]], dx); svmul((1.0)/map.MapSites[i].natoms, dx, dx); rvec_inc(coordData[b][i], dx); } // Incorrect PBC treatment at boundaries. Replaced 08/25/2014 by MER. //clear_rvec(coordData[b][i]); //for(j=0; j<map.MapSites[i].natoms; j++) rvec_inc(coordData[b][i], x[p_pb[b].MapAtoms[i][j]]); //svmul((1.0)/map.MapSites[i].natoms, coordData[b][i], coordData[b][i]); // // The lines below compare PBC-corrected and uncorrected values. //rvec x0; //clear_rvec(x0); //for(j=0; j<map.MapSites[i].natoms; j++) rvec_inc(x0, x[p_pb[b].MapAtoms[i][j]]); //svmul((1.0)/map.MapSites[i].natoms, x0, x0); //rvec_sub(x0, coordData[b][i], dx); //printf("PBC - NoPBC: (%6.10f, %6.10f, %6.10f)\n", dx[0], dx[1], dx[2]); } } return 1; } int get_electrostatics(t_pbc pbc, t_topology top, rvec *x, t_amide_map map, t_protbond *p_pb, matrix *RotMat, int bonds, rvec **coordData, real ***elecData, int nelec, int nthreads, real cutoff, int nchunks, int* START, int* STOP) { int b; int natoms = top.atoms.nr; real const nm2bohr = 18.8973; real bcutoff = cutoff*nm2bohr; int error = 0; #if OMP_PARALLEL omp_set_num_threads(nthreads); #pragma omp parallel for \ shared(natoms, pbc, top, x, map, p_pb, RotMat, bonds, \ coordData, elecData, nelec, nchunks, START, STOP) #endif for(b=0; b<bonds; b++) { int i,j,at, k; // k for looping chunks real d, invd, invd2, invd3, invd5; real q; real DX, DY, DZ; rvec x_ax, y_ax, z_ax; rvec dx; for(i=0; i<3; i++) { x_ax[i] = RotMat[b][i][0]; y_ax[i] = RotMat[b][i][1]; z_ax[i] = RotMat[b][i][2]; } for(i=0; i<nelec; i++) { if(map.elec_used[i]) { for(j=0; j<map.nsites; j++) elecData[b][j][i] = 0.0; } } for(at=0; at<natoms; at++) { int good = 1; // cjfeng 08/27/2016 if(nchunks) { // Check if we need to include only part of bath for(k=0; k<nchunks; k++) { if(at<START[k]) { // less than the starting point good = 0; } else if(at>STOP[k]) { // more than the ending point good = 0; } else { // Within one of the region good = 1; break; } } } for(i=0; i<p_pb[b].nexcluded; i++) { if(at==p_pb[b].Excluded[i]) { good = 0; break; } } if(good) { for(i=0; i<map.nsites; i++) { pbc_dx(&pbc, coordData[b][i], x[at], dx); //if((b==55)) if(i==0) printf("%6.10f\t%6.10f\t%6.10f\n", dx[0], dx[1], dx[2]); svmul(nm2bohr, dx, dx); d = norm(dx); if( (d!=0.0) && (d<bcutoff) ) { DX = iprod(x_ax, dx); DY = iprod(y_ax, dx); DZ = iprod(z_ax, dx); invd = (1.0)/d; invd2 = invd*invd; invd3 = invd*invd2; invd5 = invd2*invd3; q = top.atoms.atom[at].q; if(map.elec_used[0]) elecData[b][i][0] += q * invd; // Potential if(map.elec_used[1]) elecData[b][i][1] += ( q * DX ) * ( invd3 ); // x-field if(map.elec_used[2]) elecData[b][i][2] += ( q * DY ) * ( invd3 ); // y-field if(map.elec_used[3]) elecData[b][i][3] += ( q * DZ ) * ( invd3 ); // z-field if(map.elec_used[4]) elecData[b][i][4] += ( q*invd3 ) * ( 1 - 3*DX*DX*invd2 ); // xx if(map.elec_used[5]) elecData[b][i][5] -= ( 3*q ) * ( DX*DY*invd5 ); // xy if(map.elec_used[6]) elecData[b][i][6] -= ( 3*q ) * ( DX*DZ*invd5 ); // xz if(map.elec_used[7]) elecData[b][i][7] += ( q*invd3 ) * ( 1 - 3*DY*DY*invd2 ); // yy if(map.elec_used[8]) elecData[b][i][8] -= ( 3*q ) * ( DY*DZ*invd5 ); // yz if(map.elec_used[9]) elecData[b][i][9] += ( q*invd3 ) * ( 1 - 3*DZ*DZ*invd2 ); // zz //if(b==2) if(i==0) printf("Bond %d, atom %d: field %6.10f\n", b, at, elecData[b][i][1]); //if(b==55) if(i==0) printf("Bond %d, atom %d: charge %6.10f\n", b, at, q); //if(b==2) if(i==0) printf("Bond %d, atom %d: distance %6.10f\n", b, at, d); } else if( d==0.0 ) { printf("Division by zero when calculating electrostatics between atom %d (bond %d) and site %d.\n", at, b+1, i); error = 0; } } } } } return 1; } int get_freq(t_amide_map map, t_protbond *p_pb, int bonds, real ***elecData, int nelec, real **angleData, int nangles, real **freqData, int nfreq) { int b,i,j; real wel, wnn; for(b=0; b<bonds; b++) { wel = 0.0; wnn = 0.0; if(map.dimNNFSN[0]*map.dimNNFSN[1]>0) wnn += get_NN_val(angleData[b][0], angleData[b][1], map.NNFSN, map.dimNNFSN[0], map.dimNNFSN[1]); if(map.dimNNFSC[0]*map.dimNNFSC[1]>0) wnn += get_NN_val(angleData[b][2], angleData[b][3], map.NNFSC, map.dimNNFSC[0], map.dimNNFSC[1]); for(i=0; i<map.nsites; i++) { for(j=0; j<nelec; j++) { if(map.elec_used[j]) wel += elecData[b][i][j]*map.MapSites[i].shift[j]; } } freqData[b][0] = wel + wnn + map.freq; if(p_pb[b].isPro) { freqData[b][0] += map.proshift; } freqData[b][1] = wel; freqData[b][2] = wnn; //printf("Bond %d: %6.10f\n", b, elecData[b][0][1]); //for(i=0; i<3; i++) if(b==2) printf("%6.10f\n", elecData[b][0][i+1]); } return 1; } int get_dip(t_pbc pbc, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, real ***elecData, int nelec, matrix *RotMat, rvec *dipData, rvec *centData) { int b,i,j; rvec CN, CO; rvec mu; real fCO = 0.0665; // Remember pdb units are nm, not Angstrom real fCN = 0.0258; for(b=0; b<bonds; b++) { pbc_dx(&pbc, x[p_pb[b].N], x[p_pb[b].C], CN); unitv(CN, CN); svmul(fCN, CN, CN); pbc_dx(&pbc, x[p_pb[b].O], x[p_pb[b].C], CO); unitv(CO, CO); svmul(fCO, CO, CO); copy_rvec(x[p_pb[b].C], centData[b]); // The C atom is our reference point. rvec_inc(centData[b], CO); // Augment position with CO and CN vectors. rvec_inc(centData[b], CN); copy_rvec(map.dip, mu); for(i=0; i<map.nsites; i++) { for(j=0; j<nelec; j++) { if(map.elec_used[j]) { mu[0] += elecData[b][i][j]*map.MapSites[i].dipx[j]; mu[1] += elecData[b][i][j]*map.MapSites[i].dipy[j]; mu[2] += elecData[b][i][j]*map.MapSites[i].dipz[j]; } } } mvmul(RotMat[b], mu, dipData[b]); } return 1; } int get_pdc(t_pbc pbc, t_protbond *p_pb, int bonds, rvec *dipData, rvec *centData, real ***coupData, int ncoup) { int b1, b2; const real A = 0.1*(848619.0 / 1650.0) / (0.1011*0.1011); // Dipole derivatives can be converted to transition dipole moments using the factor // \sqrt( \hbar / 2*w_i ) = 4.1189e-25 m kg^{1/2} = 0.1011 Angstrom * amu^{1/2} // for a 1650 cm-1 vibration. A dipole derivative of 2.73 D/(A*amu^1/2) thus corresponds to // a dipole moment matrix element of 2.73*0.1011 = 0.276 D. // // The code below assumes that that the transition dipole moment is in units of Debye. real d, invd; rvec mu1, mu2; rvec r12; for(b1=0; b1<bonds; b1++) { coupData[b1][b1][0] = 0.0; copy_rvec(dipData[b1], mu1); for(b2=0; b2<b1; b2++) { copy_rvec(dipData[b2], mu2); pbc_dx(&pbc, centData[b2], centData[b1], r12); d = 10.0*norm(r12); invd = 1.0/d; unitv(r12, r12); coupData[b1][b2][0] = A*invd*invd*invd*( iprod(mu1,mu2) - 3.0*( iprod(mu1, r12)*iprod(mu2,r12) ) ); coupData[b2][b1][0] = coupData[b1][b2][0]; } } return 0; } int get_pdc_skinner(t_pbc pbc, t_protbond *p_pb, int bonds, rvec *dipData, rvec *centData, real ***coupData, int ncoup) { int b1, b2; const real A = 0.1*(848619.0 / 1650.0) * (2.73*2.73); real d, invd; rvec mu1, mu2; rvec r12; for(b1=0; b1<bonds; b1++) { coupData[b1][b1][0] = 0.0; copy_rvec(dipData[b1], mu1); unitv(mu1, mu1); for(b2=0; b2<b1; b2++) { copy_rvec(dipData[b2], mu2); unitv(mu2, mu2); pbc_dx(&pbc, centData[b2], centData[b1], r12); d = 10.0*norm(r12); invd = 1.0/d; unitv(r12, r12); coupData[b1][b2][0] = A*invd*invd*invd*( iprod(mu1,mu2) - 3.0*( iprod(mu1, r12)*iprod(mu2,r12) ) ); coupData[b2][b1][0] = coupData[b1][b2][0]; } } return 0; } /* Electrostatic coupling models such as TCC seek to evaluate matrix elements of the form <i| H_C |j> where H_C is the Coulomb interaction energy between the various atoms of the two coupled groups. To develop the TCC model, we expand in normal mode displacements from Q_i = Q_j = 0. By assumption, this starting point corresponds to the energy minimum, so that all first derivatives are zero. Second derivatives dH/dQ_i^2 and dH/DQ_j^2 corresponds to site energy shifts and are assumed to be accounted for by our electrostatic site energy maps. Mixed second derivatives d^2H/(dQ_i dQ_j), however, are expected to be non-zero and will form the basis for our TCC model. From this starting point, we need to evaluate coupling elements of the form <i| Q_i Q_j * (d^2 H_C/ dQ_i dQ_j) |j> = (d^2 H_C/ dQ_i dQ_j) <i| Q_i Q_j |j> = (d^2 H_C/ dQ_i dQ_j) \hbar/(2*sqrt{w_i w_j}) where the derivative term is evaluated at Q_i = Q_j = 0 and w_i and w_j are the frequencies of the i and j normal modes. We next neglect what should be a full integral over the ground state charge density as a function of adiabatic displacement along the normal mode coordinates and assume instead a simple sum of Coulomb interaction terms where charges are assigned to each site within the amide unit as a function of normal mode displacement: H_C = (1 / (4*\pi*\eps_0) ) * \sum_{m,n} ( q_n(Q_i) q_m(Q_j) ) / ( | r_n(Q_i) - r_m(Q_j) | ) where q_n(Q_i) and r_n(Q_i) are atomic charges and displacements of atom n as a function of the normal mode coordinate Q_i. The atomic displacements are by definition linear functions of the normal modes, i.e. r_n(Q_i) = r_n(0) + (dr_n/dQ_i) * Q_i. This linear relationship need not be true for the atomic charges, but for simplicity (and since we truncate at second order already), we make the same assumption for them: q_n(Q_i) = q_n(0) + (dq_n/dQ_i) * Q_i. The zero-displacement value q_n(0) should ideally be the same set of charges as used in electrostatic calculations for site energies, though discrepancies will not likely cause serious issues. Within this model, the mixed second derivatives are easily evaluated, with the result 4*\pi*\eps_0 * ( d^2 H_C / dQ_i dQ_j ) = \sum_{n,m} { + (dq_n*dq_m) / | r_{nm} | + ( q_n*q_m )*( dr_n \cdot dr_m) / | r_{nm} |^3 -3( q_n*q_m )*( dr_n \cdot r_{nm} )*( dr_m \cdot r_{nm} ) / | r_{nm} |^5 + [ ( dq_n*q_m )*( dr_m \cdot r_{nm} ) - ( q_n*dq_m )*( dr_n \cdot r_{nm} ) ] / | r_{nm} |^5 where for convenience we have denoted q_n(0) as q_n and (dq_n/dQ_i) as simply dq_n. The sums over n and m are to be taken over the atoms contributing to normal modes i and j, respectively. As input for the TCC calculation, we need a set of atomic charges q_n, charge fluxes dq_n, and atomic displacements dr_n (i.e. the displacement of each atom as a function of the normal mode coordinate). As a ratio of two quantities with units length and length*mass^{1/2}, the units on spatial displacements should be amu^{-1/2}. If U_{nx,j} is the unitary matrix required to diagonlize the Hessian matrix (in mass-weighted coordinates), these coefficients can be obtained as (dr_{nx}/dQ_j) = U_{nx,j}/sqrt{M_n} where r_{nx} is the x-coordinate of atom n, Q_j is the j^{th} normal mode and M_n is the mass of atom n. The charge flux should likewise be specified in units of e_o / ( Angstrom * amu^{1/2} ) since the derivative is taken along the (mass-weighted) normal mode coordinate. Note that the product of the dr_n and dq_n terms then has units of C / (Angstrom*amu). The suspicious mass unit is eliminated by the matrix element <i| Q_i Q_j |j> reported above. As an example, for a simple C=O oscillator, the correct mass-weighted normal mode coordinate is Q_j = ( sqrt(M_O)*r_{Ox} - sqrt(M_C)*r_{Cx} ) / sqrt(2) = ( 2.828 * r_{Ox} - 2.450 * r_{Cx} ) In this case, U_{Ox,j} = 1/sqrt{2}, while U_{Cx,j} = -1/sqrt{2}. The map input file should contain the entries COUPSITES: 2 C O NUX: -0.20412 0.17678 Q: 0.5 -0.5 The x-component of the dipole moment matrix element specified by these parameters is given by [mu_i]_x = (d[\mu_x]/dQ_i) = \sqrt( \hbar / (2*w_i) ) * \sum_n ( q_n(0)*dr_{nx} + dq_n r_{nx}(0) ) For a typical amide I frequency of w_i = 1650 cm-1, the prefactor evaluates to \sqrt( \hbar / 2*w_i ) = 4.1189e-25 m kg^{1/2} = 0.1011 Angstrom * amu^{1/2} while for the atomic charges and displacements specified above (letting dq_n = 0) the sum over n becomes Q_C*( 1/sqrt{2*M_C} ) + Q_O*( 1/sqrt{2*M_O} ) = 0.5 * ( -0.20412 ) - 0.5 * ( 0.17678 ) = -0.1905 e_o / amu^{1/2} giving together a dipole moment magnitude of 0.1011 * 0.1905 = 0.0193 e_o*Angstrom = 0.0193*(1.60217657e-19 C/e_o)*(1e-10 m/Angstrom) = (3.0922e-31 C m) / (3.33564e-30 C m/Debye) = 0.0927 Debye Note that compared to the experimental value of ~0.35 Debye, this model severely underestimates the Amide I oscillator strength (see Ackels et al. Vib. Spec. 50 (2009) 2–9). The discrepancy is due to the neglect of charge flow within the bond. During C=O dispacement the bonding characteristics of the amide group change, shifting from a C=O double bond resonance structure in which all atoms have formal charge of zero toward a C--O single bond structure in which the N obtains a positive charge and the O a negative one: (+) H-N H-N \ <--> \\ C=O C--O (-) To obtain a simple estimate for the atomic charges required to obtain the experimentally observed transition dipole moment strength, take as coordinates for a typical amide bond r_C = [0.000 0.000 0.000]; r_O = [1.230 -0.000 0.000]; r_N = [-0.65 1.170 0.000]; r_H = [-1.64 1.080 0.100]; in units of Angstrom. Let us suppose q_n(0) charges of 0.5, -0.5, -0.3, +0.3 for C, O, N, and H, respectively, and that the the normal mode motion involves only the C and O atoms. The contribution to the dipole moment from the static charges is then the same as calculated above, while the contribution from charge motion remains to be assigned. Assuming as a simplest model that charge flows only from N to O, we can characterize the charge flux with a single coefficient dq_N = -dq_Oj = dq > 0. The contribution to the x-component of the dipole moment then becomes \sqrt( \hbar / (2*w_i) ) * \sum_n ( q_n(0)*dr_{nx} + dq_n r_{nx}(0) ) = 0.1011 * dq * ( r_{Nx} - r_{Ox} ) = 0.1901 * {dq} e_o*Angstrom = 3.0457e-30 {dq} C*m = 0.9131 {dq} Debye (where {dq} represents the numerical value of the charge flux parameter without its units) and to the y-component 0.1011 * dq * ( r_{Ny} - r_{Oy} ) = 0.1183 * {dq} e_o*Angstrom = 1.8954e-30 {dq} C*m = 0.5682 {dq} Debye. The total oscillator strength is then (0.0927+0.9131 dq)^2 + (0.5682 dq)^2 . Setting equal to the experimental value of 0.12 Debye^2, we solve for dq to obtain 0.0086 + 0.1693 dq + 1.157 dq^2 = 0.12 1.157 dq^2 + 0.1693 dq - 0.12 = 0 --> dq = 0.257 e_o / ( Angstrom * amu^{1/2} ) A minimal (experimentally feasible) TCC model for amide I would thus consist of the mapfile entries COUPSITES: 4 C O N H NUX: -0.20412 0.17678 0.0 0.0 Q: 0.5 -0.5 -0.4 0.4 DQ: -0.257 0.0 0.257 0.0 To get the value of 1/(4*pi*Eps0) used below, start with Eps0 in standard units: Eps0 = 8.85418e-12 F/m In what follows, curly brackets {} represent numerical values (i.e. a quantity minus its units). Let F = Farads, m = meters, C = coloumbs, J = Joules, cm-1 = wavenumbers, Ang = angstrom, eo = units of elementary charge. We'll need the fundamental constants Eps0 = 8.8541878176e-12 F/m h = 6.62606957e-34 J s c = 2.99792458e10 cm / s E0 = 1.602176565e-19 C and the conversion relations {X} m = {X*1e10} Ang {X} F = {X} C^2/J {X} J = {X/hc} cm-1 {X} C = {X/EO} eo Applying these in order, we get {X} F/m = {X/1e10} F/Ang = {( X )/( 1e10 )} ( C^2 )/( Ang J ) = {( X h c )/( 1e10 )} ( C^2 )/( Ang cm-1 ) = {( X h c )/( EO^2 1e10 )} ( eo^2 )/( Ang cm-1 ) So we have 4*pi*Eps0 = {( 4*pi*(8.8541878176e-12)*(6.62606957e-34)*(2.99792458e10) )/( (1e10)*(1.602176565e-19)^2 )} ( eo^2 )/( Ang cm-1 ) Finally, we note that literature normal modes and charge fluxes are frequently reported in "unitless" coordinates, in which the mass-weighted normal mode coefficients dr/dQ have been multiplied by a factor with mass units. These values can be converted to standard units (for input in the map file) by (1) multiplying the reported normal mode coefficient by the square root of the atomic mass, (2) normalizing the resultant vector to have unit length (thus producing the corresponding column of the unitary transformation matrix which transforms mass-weighted coordinates into mass-weighted normal modes), and (3) dividing by the square root of the mass to produce dr/dQ. The charge fluxes must likewise be re-scaled by the same factor. For the Jansen TCC map parameters reported in [J. Chem. Phys. 125, 044312 (2006)], the conversion can be done in Matlab by entering the commands: dq = [0.01668, -0.02845, -0.01530, 0.01736, 0.00008, 0.00963]'; nu = 0.028074*[0, 0, 0; -0.831, 0.105, 0.0; 0.517, -0.047, 0.0; 0.074, -0.036, 0.0; 0.073, -0.133, 0.0; 0.0, 0.0, 0.0]; NU = [nu(:,1); nu(:,2); nu(:,3)]; m = [12, 12, 16, 14, 2, 12]'; M = [m; m; m]; U = NU.*sqrt(M); U = U/norm(U); nu = (U./sqrt(M)); dq = dq*(nu(2)/NU(2)); The resultant values nu and dq should be dq = [0.165604207569485 -0.282460413989918 -0.151903140036758 0.172355458237785 0.000794264784505922 0.0956096234349003] and nu = [0.0000 -0.231622444056777 0.144102050032917 0.0206258253432028 0.0203470979737001 0.0 0.0 0.0292663737977878 -0.0131001863666288 -0.0100341853020987 -0.0370707401438645 0.0 0.0 0.0 0.0 0.0 0.0 0.0] */ int get_tcc(t_pbc pbc, t_amide_map map, rvec *x, t_protbond *p_pb, int bonds, matrix *RotMat, real ***coupData, int ncoup) { int i,j,n,m,k; rvec dx, ri, rj, rij, nu, nui, nuj; real d, invd, invd3, invd5, J; real qn, qm, dqn, dqm; real ONEOVER4PIEPS0 = 1.161409733881537e+05; // units of ( cm-1 * Ang )/( eo^2 ) // For coupling purposes, we take wi = wj = 1650 cm-1. real prefac = 0.0102168287; // \hbar/(2*sqrt{w_i w_j}) in Angstrom^2 * amu for(i=0; i<bonds; i++) { for(j=0; j<i; j++) { J = 0.0; int divbyzero = 0; for(n=0; n<map.ncoupsites; n++) { nu[0] = map.CoupSites[n].nux; nu[1] = map.CoupSites[n].nuy; nu[2] = map.CoupSites[n].nuz; mvmul(RotMat[i], nu, nui); copy_rvec(x[p_pb[i].CoupAtoms[n][0]], ri); for(k=1; k<p_pb[i].ncoupatoms[n]; k++) { pbc_dx(&pbc, x[p_pb[i].CoupAtoms[n][k]], x[p_pb[i].CoupAtoms[n][0]], dx); svmul((1.0)/p_pb[i].ncoupatoms[n], dx, dx); rvec_inc(ri, dx); } // Dipole moment components in e_o * Angstrom for(m=0; m<map.ncoupsites; m++) { nu[0] = map.CoupSites[m].nux; nu[1] = map.CoupSites[m].nuy; nu[2] = map.CoupSites[m].nuz; mvmul(RotMat[j], nu, nuj); copy_rvec(x[p_pb[j].CoupAtoms[m][0]], rj); for(k=0; k<p_pb[j].ncoupatoms[m]; k++) { pbc_dx(&pbc, x[p_pb[j].CoupAtoms[m][k]], x[p_pb[j].CoupAtoms[m][0]], dx); svmul((1.0)/p_pb[j].ncoupatoms[m], dx, dx); rvec_inc(rj, dx); } pbc_dx(&pbc, ri, rj, rij); // points towards i bond svmul(10.0, rij, rij); d = norm(rij); if(d==0) { //printf("Warning: Division by zero when calculating transition charge coupling constant for bonds %d and %d.\n", i, j); //printf("Setting coupling constant to zero...\n"); divbyzero = 1; break; } invd = (1.0)/d; invd3 = invd*invd*invd; invd5 = invd3*invd*invd; qn = map.CoupSites[n].q; qm = map.CoupSites[m].q; dqn = map.CoupSites[n].dq; dqm = map.CoupSites[m].dq; J += prefac*ONEOVER4PIEPS0*(dqn*dqm)*invd; J -= prefac*ONEOVER4PIEPS0*(3*qn*qm)*iprod(nui,rij)*iprod(nuj,rij)*invd5; J -= prefac*ONEOVER4PIEPS0*( -dqn*qm*iprod(nuj,rij) + qn*dqm*iprod(nui,rij) - qn*qm*iprod(nui,nuj) )*invd3; } } if(divbyzero) J = 0.0; coupData[i][j][0] = J; coupData[j][i][0] = J; //if((i==2) && (j==0)) printf("TCC (%d, %d): %6.10f\n", i, j, J); } } return 0; } int get_tc_dip(t_pbc pbc, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, matrix *RotMat, rvec *dipData, rvec *centData) { int i,n,k; rvec dx, ri, nu, nui; rvec dip, ref; // real pi = 3.14159265359; // real c = 2.99792458e+10; // cm/s // real hbar = (6.62606957e-34) / (2.0 * pi); // J*s // real wo = 1650.0*c*2.0*pi; // 1/s real prefac = 0.485498038384979; // sqrt( hbar / (2.0*wo) ) * ( 1.60217657e-19 ) / ( sqrt( 1.660538921e-27 ) * ( 3.33564e-30 ) ); // \sqrt( \hbar / 2*w_i ) * ( 1.60217657e-19 C/eo ) / ( sqrt( 1.660538921e-27 amu/kg) * ( 3.33564e-30 C*m/Debye ) ) for(i=0; i<bonds; i++) { // We use CoupAtoms[0][0] as our spatial reference point. // All coordinates MUST be updated relative to this point; // otherwise PBC may give errors. copy_rvec(x[p_pb[i].CoupAtoms[0][0]], ref); for(k=0; k<3; k++) dip[k] = 0.0; for(n=0; n<map.ncoupsites; n++) { nu[0] = map.CoupSites[n].nux; nu[1] = map.CoupSites[n].nuy; nu[2] = map.CoupSites[n].nuz; mvmul(RotMat[i], nu, nui); copy_rvec(x[p_pb[i].CoupAtoms[n][0]], ri); for(k=1; k<p_pb[i].ncoupatoms[n]; k++) { pbc_dx(&pbc, x[p_pb[i].CoupAtoms[n][k]], x[p_pb[i].CoupAtoms[n][0]], dx); svmul((1.0)/p_pb[i].ncoupatoms[n], dx, dx); rvec_inc(ri, dx); } // Now treat PBC: Take pbc_dx of ri from ref to get correct pbc-treated spatial displacement. pbc_dx(&pbc, ri, ref, dx); // Then add (without pbc-correction) this displacement to ref to get the pbc-corrected ri. // This treatment assures that the relative positions of all coupling sites are consistent // with one another within a single pbc image. for(k=0; k<3; k++) ri[k] = ref[k] + dx[k]; // Dipole moment components in e_o * Angstrom for(k=0; k<3; k++) dip[k] += (map.CoupSites[n].q*nui[k] + 10.0*map.CoupSites[n].dq*ri[k]); } for(k=0; k<3; k++) dipData[i][k] = prefac*dip[k]; } return 0; } real get_avg_tc_dip(t_pbc pbc, rvec *x, t_amide_map map, t_protbond *p_pb, int bonds, matrix *RotMat) { int i,n,k; rvec dx, ri, nu, nui; rvec dip; real prefac = 0.485498038384979; // sqrt( hbar / (2.0*wo) ) * ( 1.60217657e-19 ) / ( sqrt( 1.660538921e-27 ) * ( 3.33564e-30 ) ); // \sqrt( \hbar / 2*w_i ) * ( 1.60217657e-19 C/eo ) / ( sqrt( 1.660538921e-27 amu/kg) * ( 3.33564e-30 C*m/Debye ) ) real avg = 0.0; rvec ref; for(i=0; i<bonds; i++) { // We use CoupAtoms[0][0] as our spatial reference point. // All coordinates MUST be updated relative to this point; // otherwise PBC may give errors. copy_rvec(x[p_pb[i].CoupAtoms[0][0]], ref); for(k=0; k<3; k++) dip[k] = 0.0; for(n=0; n<map.ncoupsites; n++) { nu[0] = map.CoupSites[n].nux; nu[1] = map.CoupSites[n].nuy; nu[2] = map.CoupSites[n].nuz; mvmul(RotMat[i], nu, nui); copy_rvec(x[p_pb[i].CoupAtoms[n][0]], ri); for(k=1; k<p_pb[i].ncoupatoms[n]; k++) { pbc_dx(&pbc, x[p_pb[i].CoupAtoms[n][k]], x[p_pb[i].CoupAtoms[n][0]], dx); svmul((1.0)/p_pb[i].ncoupatoms[n], dx, dx); rvec_inc(ri, dx); } // Now treat PBC: Take pbc_dx of ri from ref to get correct pbc-treated spatial displacement. pbc_dx(&pbc, ri, ref, dx); // Then add (without pbc-correction) this displacement to ref to get the pbc-corrected ri. // This treatment assures that the relative positions of all coupling sites are consistent // with one another within a single pbc image. for(k=0; k<3; k++) ri[k] = ref[k] + dx[k]; // Dipole moment components in e_o * Angstrom for(k=0; k<3; k++) dip[k] += (map.CoupSites[n].q*nui[k] + 10.0*map.CoupSites[n].dq*ri[k]); } real val = 0.0; for(k=0; k<3; k++) val += prefac*prefac*dip[k]*dip[k]; avg += sqrt(val)/( (real) bonds ); } return avg; } int get_tcc_jansen(t_pbc pbc, t_amide_map map, rvec *x, t_protbond *p_pb, int bonds, matrix *RotMat, real ***coupData, int ncoup) { /* To get the value of 1/(4*pi*Eps0) used below, start with Eps0 in standard units: Eps0 = 8.85418e-12 F/m In what follows, curly brackets {} represent numerical values (i.e. a quantity minus its units). Let F = Farads, m = meters, C = coloumbs, J = Joules, cm-1 = wavenumbers, Ang = angstrom, eo = units of elementary charge. We'll need the fundamental constants Eps0 = 8.8541878176e-12 F/m h = 6.62606957e-34 J s c = 2.99792458e10 cm / s E0 = 1.602176565e-19 C and the conversion relations {X} m = {X*1e10} Ang {X} F = {X} C^2/J {X} J = {X/hc} cm-1 {X} C = {X/EO} eo Applying these in order, we get {X} F/m = {X/1e10} F/Ang = {( X )/( 1e10 )} ( C^2 )/( Ang J ) = {( X h c )/( 1e10 )} ( C^2 )/( Ang cm-1 ) = {( X h c )/( EO^2 1e10 )} ( eo^2 )/( Ang cm-1 ) So we have 4*pi*Eps0 = {( 4*pi*(8.8541878176e-12)*(6.62606957e-34)*(2.99792458e10) )/( (1e10)*(1.602176565e-19)^2 )} ( eo^2 )/( Ang cm-1 ) */ int ndxi[6], ndxj[6]; int i,j,n,m; real ONEOVER4PIEPS0 = 1.161409733881537e+05; // units of ( cm-1 * Ang )/( eo^2 ) rvec rij, nui, nuj; real d, invd, invd3, invd5, J; real amp = 0.028074; // units of Angstrom/( normal mode amplitude ) // Vector nu is the normal mode coordinate in units of the normal mode amplitude // just defined. nu*amp has units of Angstrom. rvec nu[6] = { { 0.0, 0.0, 0.0}, {-0.831, 0.105, 0.0}, {0.517, -0.047, 0.0}, {0.074, -0.036, 0.0}, {0.073, -0.133, 0.0}, {0.0, 0.0, 0.0} }; // units of elementary charge real q[6] = {0.11072, 0.37173, -0.53632, -0.48418, 0.24278, 0.29527}; real dq[6] = {0.01668, -0.02845, -0.01530, 0.01736, 0.00008, 0.00963}; for(i=0; i<bonds; i++) { ndxi[0] = p_pb[i].CAN; ndxi[1] = p_pb[i].C; ndxi[2] = p_pb[i].O; ndxi[3] = p_pb[i].N; ndxi[4] = p_pb[i].H; ndxi[5] = p_pb[i].CAC; for(j=0; j<i; j++) { // Make sure they're not nearest neighbors--otherwise, get division by zero if( (p_pb[i].CAN!=p_pb[j].CAC) && (p_pb[j].CAN!=p_pb[i].CAC) ) { ndxj[0] = p_pb[j].CAN; ndxj[1] = p_pb[j].C; ndxj[2] = p_pb[j].O; ndxj[3] = p_pb[j].N; ndxj[4] = p_pb[j].H; ndxj[5] = p_pb[j].CAC; J = 0.0; for(n=0; n<6; n++) { mvmul(RotMat[i], nu[n], nui); for(m=0; m<6; m++) { mvmul(RotMat[j], nu[m], nuj); pbc_dx(&pbc, x[ndxi[n]], x[ndxj[m]], rij); // points towards i bond svmul(10.0, rij, rij); d = norm(rij); invd = (1.0)/d; invd3 = invd*invd*invd; invd5 = invd3*invd*invd; J += ONEOVER4PIEPS0*(dq[n]*dq[m])*invd; J -= ONEOVER4PIEPS0*amp*amp*(3*q[n]*q[m])*iprod(nui,rij)*iprod(nuj,rij)*invd5; J -= ONEOVER4PIEPS0*( -dq[n]*q[m]*amp*iprod(nuj,rij) + q[n]*dq[m]*amp*iprod(nui,rij) - q[n]*q[m]*amp*amp*iprod(nui,nuj) )*invd3; } } coupData[i][j][0] = J; coupData[j][i][0] = J; } coupData[i][i][0] = 0.0; } } return 0; } int get_nnc(t_amide_map map, t_protbond *p_pb, int bonds, real **angleData, real ***coupData) { real Jnn; int b1, b2; int NN; for(b1=0; b1<bonds; b1++) { for(b2=0; b2<b1; b2++) { NN = 0; // Check if the N-terminal CA of b1 is the C-terminal CA of b2 // Otherwise, check if the N-terminal CA of b2 is the C-terminal CA of b1 if( ((p_pb[b1].dih_set_N*p_pb[b2].dih_set_C)==1) && (p_pb[b1].psiN[1]==p_pb[b2].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][0], angleData[b1][1], map.NNC, map.dimNNC[0], map.dimNNC[1]); NN = 1; } else if( ((p_pb[b1].dih_set_C*p_pb[b2].dih_set_N)==1) && (p_pb[b2].psiN[1]==p_pb[b1].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][2], angleData[b1][3], map.NNC, map.dimNNC[0], map.dimNNC[1]); NN = 1; } if(NN) { coupData[b1][b2][0] = Jnn; coupData[b2][b1][0] = Jnn; coupData[b1][b2][2] = Jnn; coupData[b2][b1][2] = Jnn; } } } return 1; } int get_dnnc(t_amide_map map, t_protbond *p_pb, int bonds, real **angleData, real ***coupData) { real Jnn; int b1, b2; int NN; for(b1=0; b1<bonds; b1++) { for(b2=0; b2<b1; b2++) { NN = 0; // Check if the N-terminal CA of b1 is the C-terminal CA of b2 // Otherwise, check if the N-terminal CA of b2 is the C-terminal CA of b1 if( ((p_pb[b1].dih_set_N*p_pb[b2].dih_set_C)==1) && (p_pb[b1].psiN[1]==p_pb[b2].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][0], angleData[b1][1], map.DNNC, map.dimDNNC[0], map.dimDNNC[1]); NN = 1; } else if( ((p_pb[b1].dih_set_C*p_pb[b2].dih_set_N)==1) && (p_pb[b2].psiN[1]==p_pb[b1].psiC[1]) ) { Jnn = get_NN_val(angleData[b1][2], angleData[b1][3], map.DNNC, map.dimDNNC[0], map.dimDNNC[1]); NN = 1; } if(NN) { coupData[b1][b2][0] += Jnn; coupData[b2][b1][0] += Jnn; coupData[b1][b2][2] += Jnn; coupData[b2][b1][2] += Jnn; } } } return 1; } int open_info_file(FILE **p_fp, char* namebase, const int maxchar) { char infonm[maxchar]; if( (namebase!=NULL && (strlen(namebase)>0) )) { strcpy(infonm, namebase); if(namebase[strlen(namebase)-1]!='/') { strcat(infonm, "_"); } } else { infonm[0] = '\0'; } strcat(infonm, "info.txt"); *p_fp = fopen(infonm, "w"); if(*p_fp==NULL) { printf("Error opening output file for writing.\n"); return 0; } else return 1; } int open_spec_files(FILE **p_fp, int nfiles, char* namebase, const int maxchar) { int i; char sitenm[maxchar]; char hamnm[maxchar]; char dipxnm[maxchar]; char dipynm[maxchar]; char dipznm[maxchar]; if( (namebase!=NULL && (strlen(namebase)>0) )) { strcpy(sitenm, namebase); strcpy(hamnm, namebase); strcpy(dipxnm, namebase); strcpy(dipynm, namebase); strcpy(dipznm, namebase); if(namebase[strlen(namebase)-1]!='/') { strcat(sitenm, "_"); strcat(hamnm, "_"); strcat(dipxnm, "_"); strcat(dipynm, "_"); strcat(dipznm, "_"); } } else { sitenm[0] = '\0'; hamnm[0] = '\0'; dipxnm[0] = '\0'; dipynm[0] = '\0'; dipznm[0] = '\0'; } strcat(sitenm, "sites.txt"); strcat(hamnm, "ham.txt"); strcat(dipxnm, "dipx.txt"); strcat(dipynm, "dipy.txt"); strcat(dipznm, "dipz.txt"); p_fp[0] = fopen(sitenm, "w"); p_fp[1] = fopen(hamnm, "w"); p_fp[2] = fopen(dipxnm, "w"); p_fp[3] = fopen(dipynm, "w"); p_fp[4] = fopen(dipznm, "w"); for(i=0; i<nfiles; i++) { if(p_fp[i]==NULL) { printf("Error opening output file for writing.\n"); return 0; } } return 1; } int write_spec_data(FILE **p_fp, real ***coupData, real **freqData, rvec *dipData, int bonds) { int b1,b2; for(b1=0; b1<bonds; b1++) { fprintf(p_fp[0], "%10.6f\t", freqData[b1][0]); for(b2=0; b2<bonds; b2++) { if(b1==b2) fprintf(p_fp[1], "%10.6f\t", freqData[b1][0]); else fprintf(p_fp[1], "%10.6f\t", coupData[b1][b2][0]); } fprintf(p_fp[2], "%10.6f\t", dipData[b1][0]); fprintf(p_fp[3], "%10.6f\t", dipData[b1][1]); fprintf(p_fp[4], "%10.6f\t", dipData[b1][2]); } fprintf(p_fp[0], "\n"); fprintf(p_fp[1], "\n"); fprintf(p_fp[2], "\n"); fprintf(p_fp[3], "\n"); fprintf(p_fp[4], "\n"); return 1; } int close_spec_files(FILE **p_fp, int nfiles) { int i; for(i=0; i<nfiles; i++) fclose(p_fp[i]); return 1; } int open_elec_files(FILE **elecfp[10], char *outname, t_amide_map map, const int maxchar) { int i,j,k,l; char fname[maxchar]; char COMP[10][6] = { "P", "Ex", "Ey", "Ez", "Gxx", "Gxy", "Gxz", "Gyy", "Gyz", "Gzz" }; for(j=0; j<10; j++) { if(map.elec_used[j]) { elecfp[j] = (FILE**) malloc(map.nsites*sizeof(FILE*)); if(elecfp[j]==NULL) { printf("Error opening electrostatic output files. Please check input.\n"); int jx; for(jx=0; jx<j; jx++) free(elecfp[j]); return 0; } } else elecfp[j] = NULL; } for(j=0; j<10; j++) { for(i=0; i<map.nsites; i++) { if(map.elec_used[j]) { fname[0] = '\0'; if( (outname!=NULL) && (strlen(outname)>0) ) { strcpy(fname, outname); if(outname[strlen(outname)-1]!='/') strcat(fname, "_"); } strcat(fname, COMP[j]); for(k=0; k<map.MapSites[i].natoms; k++) { if(k>0) strcat(fname, "_AND"); for(l=0; l<map.MapSites[i].AtomPaths[k].length; l++) { strcat(fname, "_"); strcat(fname, map.MapSites[i].AtomPaths[k].Path[l]); } } strcat(fname, ".txt"); elecfp[j][i] = fopen(fname, "w"); if(elecfp[j][i]==NULL) { printf("Error opening electrostatic output files. Please check input.\n"); int ix, jx; for(ix=0; ix<i; ix++) fclose(elecfp[j][ix]); free(elecfp[j]); for(jx=0; jx<j; jx++) { for(ix=0; ix<map.nsites; ix++) fclose(elecfp[jx][ix]); free(elecfp[jx]); } return 0; } } } } return 1; } int open_angle_files(FILE *anglefp[10], char *outname, const int maxchar) { int i; char fname[maxchar]; char NAME[4][6] = {"phiN", "psiN", "phiC", "psiC"}; for(i=0; i<4; i++) { fname[0] = '\0'; if( (outname!=NULL) && (strlen(outname)>0) ) { strcpy(fname, outname); if(outname[strlen(outname)-1]!='/') strcat(fname, "_"); } strcat(fname, NAME[i]); strcat(fname, ".txt"); anglefp[i] = fopen(fname, "w"); if(anglefp[i]==NULL) { printf("Error opening angle output files. Please check input.\n"); int ix; for(ix=0; ix<i; ix++) fclose(anglefp[ix]); return 0; } } return 1; } int write_elec_dat(FILE **elecfp[10], t_amide_map map, real ***elecData, int bonds, int nelec) { int b,i,j; for(i=0; i<nelec; i++) { if(map.elec_used[i]) { for(j=0; j<map.nsites; j++) { for(b=0; b<bonds; b++) { fprintf(elecfp[i][j], "%6.10f\t", elecData[b][j][i]); } fprintf(elecfp[i][j], "\n"); } } } return 1; } int write_angle_dat(FILE *anglefp[4], real **angleData, int bonds, int nangles) { int b,i; for(i=0; i<nangles; i++) { for(b=0; b<bonds; b++) fprintf(anglefp[i], "%6.10f\t", angleData[b][i]); fprintf(anglefp[i], "\n"); } return 1; } int close_elec_files(FILE **elecfp[10], t_amide_map map) { int i,j; for(j=0; j<10; j++) { if(elecfp[j]!=NULL) { for(i=0; i<map.nsites; i++) if(elecfp[j][i]!=NULL) fclose(elecfp[j][i]); free(elecfp[j]); } } return 1; } int close_angle_files(FILE *anglefp[4]) { int i; for(i=0; i<4; i++) if(anglefp[i]!=NULL) fclose(anglefp[i]); return 1; } /******************************************************************************** * Main code. * ********************************************************************************/ int main ( int argc, char * argv[] ) { /************************************************************************ This section deals with command line arguments. Most of the action happens in the parse_common_args() command, which checks what command line arguments were provided and initializes the corresonding variables **************************************************************************/ // Variables set by command line arguments char* mapfile = NULL; char* promapfile = NULL; char* chargefile = NULL; char* outname = NULL; // cjfeng 08/27/2016 // Added chunk command to selectively choose portion of bath // to include in electrostatics. char* chunkstr = ""; int nthreads = 1; int print_elec = 0; int print_angles = 0; int nnc, nnfsn, nnfsc, dnnc; real cutoff = 1000.0; real osc = -1; int i; int verbose = 0; // A list of command line file flags t_filenm fnm[] = { { efTRX, "-f", NULL, ffREAD }, { efTPX, NULL, NULL, ffREAD } }; // A list of additional command line arguments t_pargs pa [] = { {"-mapfile", FALSE, etSTR, {&mapfile}, "Complete map file, specifying all parameters."}, {"-chargefile", FALSE, etSTR, {&chargefile}, "Complete charge file, specifying all charges."}, {"-promapfile", FALSE, etSTR, {&promapfile}, "Complete map file, specifying all parameters."}, {"-outname", FALSE, etSTR, {&outname}, "File name base for output"}, {"-nt", FALSE, etINT, {&nthreads}, "Number of threads"}, {"-print_elec", FALSE, etBOOL, {&print_elec}, "Print electrostatic values"}, {"-print_angles", FALSE, etBOOL, {&print_angles}, "Print dihedral angles"}, {"-cutoff", FALSE, etREAL, {&cutoff}, "Cutoff distance (nm)"}, {"-osc", FALSE, etREAL, {&osc}, "Oscillator strength (Debye^2). If positive, used to normalize dipole moments."}, {"-verbose", FALSE, etBOOL, {&verbose}, "Verbose (print lots of info)"}, {"-chunk", FALSE, etSTR, {&chunkstr}, "Chunk range for selecting part of the system to be included for electrostatic frequency shift, format: [a-b;...;c-d]. Indexing begins at zero."} }; // The program description const char *desc[] = {""}; // A description of known bugs const char *bugs[] = {""}; // Here we parse the command line arguments and make them available to call within the program output_env_t oenv; CopyRight(stderr, argv[0]); parse_common_args(&argc, argv, PCA_CAN_TIME | PCA_BE_NICE, asize(fnm), fnm, asize(pa), pa, asize(desc), desc, asize(bugs), bugs, &oenv); /********************************************************** * We first check if only part of the bath is included for * * computing electrostatic frequency shift. * * cjfeng 08/27/2016 * **********************************************************/ int START[1000]; // Starting index for computing electrostatics int STOP[1000]; // Ending index for computing electrostatics int nchunks = 0; // Number of chunks if( strlen(chunkstr)!=0 ) { int i, j; printf("Parsing chunk request %s...\n", chunkstr); if( (chunkstr[0]!='[') || (chunkstr[strlen(chunkstr)-1]!=']') ) { printf("Error parsing chunk string. Format: [a-b;c-d;...;e-f]\n"); return 0; } else { // Changed string processing code. MER 10/04/2014 char chunk[1000]; for(i=0; i<strlen(chunkstr); i++) { if( (chunkstr[i]==';') || (chunkstr[i]=='[') ) { for(j=i+1; j<strlen(chunkstr); j++) { if( (chunkstr[j]==';') || (chunkstr[j]==']') ) { strncpy(chunk, chunkstr+i+1, j-i-1); chunk[j-i-1] = '\0'; break; } } int start, stop; if( (j>=strlen(chunkstr)) || (sscanf(chunk, "%d-%d", &start, &stop)!=2) ) { printf("Error parsing input segment %s\n", chunk); return 0; } else { START[nchunks] = start; STOP[nchunks] = stop; nchunks++; } } } } } /************************************************************************ Now we check the input. First we check and set the mapping parameters, then the (first frame of the) provided structure file. **************************************************************************/ // First load map parameters t_amide_map map; t_amide_map promap; if( (mapfile==NULL) ) { printf("Please specify a map using the -mapfile flag.\n"); return 0; } // read_amide_map opens the provided map file and stores all data in the map structure if(!read_amide_map(mapfile, &map, verbose)) { printf("Error reading specified map file. Please check input.\n"); return 0; } printf("\nSuccessfully loaded all mapping parameters from input file %s. \n", mapfile); if((promapfile!=NULL)) { if(!read_amide_map(promapfile, &promap, verbose)) { printf("Error reading specified proline map file. Please check input.\n"); return 0; } printf("\nSuccessfully loaded all mapping parameters from proline map file %s. \n", promapfile); printf("\nWarning: Currently only site energy shifts are used from the proline map file. Coupling parameters (both through-space and through-bond) are taken from the default Amide I map.\n"); } else { promap.nsites = 0; } // Check whether nearest-neighbor coupling is used. if((map.dimNNC[0]*map.dimNNC[1])>0) nnc = 1; else nnc = 0; if((map.dimDNNC[0]*map.dimDNNC[1])>0) dnnc = 1; else dnnc = 0; // Check whether nearest-neighbor frequency shifts are used. if((map.dimNNFSN[0]*map.dimNNFSN[1])>0) nnfsn = 1; else nnfsn = 0; if((map.dimNNFSC[0]*map.dimNNFSC[1])>0) nnfsc = 1; else nnfsc = 0; if(verbose) { printf("Coupling model: %s\n", map.couptype); if(nnc) printf("Using nearest-neighbor coupling map.\n"); if(dnnc) printf("Using nearest-neighbor coupling difference map.\n"); if((nnc+dnnc)==0) printf("No nearest-neighbor coupling map in use.\n"); if(nnfsn) printf("Using N-terminal nearest-neighbor frequency-shift map.\n"); if(nnfsc) printf("Using C-terminal nearest-neighbor frequency-shift map.\n"); if((nnc+dnnc)==0) printf("No nearest-neighbor frequency shifts in use.\n"); } if(map.dipset) { if(verbose) printf("Using dipole vector provided in map file\n"); } else if(map.tcset) { if(verbose) printf("Using TCC parameters provided in map file to set dipole moment.\n"); } else { printf("Error! No method specified for calculating dipole moments. \n"); printf("Please provide either a dipole moment (DIP:) entry or TCC parameters.\n"); free_amide_map(map); return 0; } // Read atomic charges. int ffcharge = 0; int nres; t_residue *resarray; int nrep; char **reparray[2]; if( (chargefile!=NULL) ) { ffcharge = 0; if(read_charge_map(chargefile, &resarray, &nres, reparray, &nrep, verbose)!=0) { printf("Error reading charge file. \n"); free_amide_map(map); free_amide_map(promap); return 0; } } else { // If no chargefile is specified, we use force-field charges. // We still need to manually enter a replacement name array since // some names (particularly HN for the CHARMM27 force field) will // often need to be swapped. int error = 0; int j; ffcharge = 1; int nchars = 20; nrep = 8; nres = 0; resarray = NULL; reparray[0] = NULL; reparray[1] = NULL; reparray[0] = (char**) malloc(nrep*sizeof(char*)); reparray[1] = (char**) malloc(nrep*sizeof(char*)); if(reparray[0]==NULL) error = 1; if(reparray[1]==NULL) error = 1; for(i=0; i<nrep; i++) { if(!error) { reparray[0][i] = (char*) malloc(nchars*sizeof(char)); if(reparray[0][i]==NULL) { for(j=0; j<i; j++) free(reparray[0][j]); error = 2; } } else if(reparray[0]!=NULL) reparray[0][i] = NULL; if(!error) { reparray[1][i] = (char*) malloc(nchars*sizeof(char)); if(reparray[1][i]==NULL) { for(j=0; j<nrep; j++) free(reparray[1][j]); for(j=0; j<i; j++) free(reparray[1][j]); error = 2; } } else if(reparray[1]!=NULL) reparray[1][i] = NULL; } if(!error) { char *DEFREP1[] = { "HN", "O1", "O2", "OT1", "OT2", "HT2", "HT1", "HT2" }; char *DEFREP2[] = { "H", "OT", "O", "OT", "O", "HO", "H1", "H2"}; for(i=0; i<nrep; i++) { strcpy(reparray[0][i], DEFREP1[i]); strcpy(reparray[1][i], DEFREP2[i]); } } if(error) { if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_amide_map(map); free_amide_map(promap); return 0; } } // Now structure files. First topology, then first frame of trajectory t_topology top; t_inputrec ir; t_trxstatus *status; int trrStatus = 1; real t; rvec *x; matrix box; int natoms; int ePBC; t_pbc pbc; // read_tpx_top() reads the topology file and stores the values in the t_topology type top. The t_inputrec type ir stores information // on the box and periodic boundary conditions. The matrix box contains the vectors defining the periodic box. ePBC = read_tpx_top(ftp2fn(efTPX,asize(fnm),fnm), &ir, box, &natoms, NULL, NULL, NULL, &top); // set_pbc initializes a t_pbc type variable pbc to be used later on in calls to, e.g. pbc_dx for calculating pbc-corrected distances. set_pbc(&pbc, ePBC, box); // read_first_x opens a trajectory file, allocates memory for one frame of coordinates (the rvec array x), // and reads the first frame from the trajectory. natoms = read_first_x(oenv, &status, ftp2fn(efTRX,asize(fnm),fnm), &t, &x, box); // Check to see if the number of atoms in topology and trajectory files match. if ( natoms > top.atoms.nr ) gmx_fatal(FARGS,"Topology (%d atoms) does not match trajectory (%d atoms)", top.atoms.nr, natoms); /************************************************************************ Now parse the protein structure to assign charges and identify amide bonds. **************************************************************************/ int error = 0; t_protbond *p_pb = NULL; // Protein bond array int b; // b is a counter, bonds is the number of bonds. int bonds = 0; t_protbond *p_ppb = NULL; // Proline protein bond array int nPro = 0; // First, make any atom name replacements necessary. if(!error) swap_atomnames(top, reparray, nrep, verbose); // Now reassign atomic charges. if( (!error) && (!ffcharge) ) if(identify_residues(&pbc, top, x, resarray, nres, verbose)!=0) error = 1; // And look for bonds. if(!error) { bonds = find_bonds(&pbc, top, x, &p_pb); printf("\nLocated %d amide bonds in the peptide chain\n", bonds); if(bonds<1) { printf("Error: No amide bonds located.\n"); error = 1; } else { for(b=0; b<bonds; b++) { if(p_pb[b].isPro) nPro++; } } printf("\nLocated %d proline bonds in the peptide chain\n", nPro); if(promap.nsites!=0) { p_ppb = (t_protbond*) malloc( sizeof(t_protbond)*nPro ); if(p_ppb==NULL) { printf("Error allocating memory for proline bonds.\n"); error = 1; } int count = 0; for(b=0; b<bonds; b++) { if(p_pb[b].isPro) { initialize_bond(&pbc, top, x, &p_ppb[count], p_pb[b].N, p_pb[b].H, p_pb[b].C, p_pb[b].O, p_pb[b].isPro); count++; } } } } // Next identify mapping sites for each bond if(!error) if(!find_map_sites(pbc, top, x, map, p_pb, bonds)) error = 1; if((!error) && (promap.nsites!=0)) if(!find_map_sites(pbc, top, x, promap, p_ppb, nPro)) error = 1; // And identify coupling sites for each bond // For now, coupling sites are taken ONLY from the main map file. // Coupling sites in the proline map file are ignored. if(!error) if(!find_coup_sites(pbc, top, x, map, p_pb, bonds)) error = 1; if( (!error) && (promap.nsites!=0) ) if(!find_coup_sites(pbc, top, x, map, p_ppb, nPro)) error = 1; // And identify excluded atoms for electrostatic calculation if(!error) if(!find_excluded_atoms(pbc, top, x, map, p_pb, bonds)) error = 1; if((!error) && (promap.nsites!=0)) if(!find_excluded_atoms(pbc, top, x, promap, p_ppb, nPro)) error = 1; // If anything went wrong, surrender with dignity. if(error) { for(b=0; b<bonds; b++) free_bond(p_pb[b]); if(p_pb!=NULL) free(p_pb); if(p_ppb!=NULL) free(p_ppb); if(reparray[0]!=NULL) for(i=0; i<nrep; i++) if(reparray[0][i]!=NULL) free(reparray[0][i]); if(reparray[1]!=NULL) for(i=0; i<nrep; i++) if(reparray[1][i]!=NULL) free(reparray[1][i]); if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_resarray(resarray, nres); free_amide_map(map); free_amide_map(promap); return 0; } // Tell the user what we found. if(verbose) for(b=0; b<bonds; b++) print_bond(p_pb[b], top); if(verbose) for(b=0; b<nPro; b++) print_bond(p_ppb[b], top); /************************************************************************ Allocate memory for data arrays and open output files. **************************************************************************/ int arraysset = 0; int specfilesopen = 0; int elecfilesopen = 0; int anglefilesopen = 0; real ***elecData; real ***ProElecData; const int nelec = 10; real **angleData; real **ProAngleData; const int nangles = 4; real **freqData; real **ProFreqData; const int nfreq = 3; real ***coupData; const int ncoup = 3; rvec **coordData; rvec **ProCoordData; rvec *dipData = NULL; rvec *centData = NULL; matrix *RotMat = NULL; matrix *ProRotMat = NULL; if(!set_arrays(&elecData, &ProElecData, nelec, &angleData, &ProAngleData, nangles, &freqData, &ProFreqData, nfreq, &coupData, ncoup, &coordData, &ProCoordData, &dipData, &centData, &RotMat, &ProRotMat, bonds, map.nsites, nPro, promap.nsites)) { printf("Error allocating memory for data arrays\n"); error = 1; } else arraysset = 1; const int maxchar = 1024; FILE *infofp = NULL; int infofileopen = 0; if(!open_info_file(&infofp,outname,maxchar)) error = 1; else infofileopen = 1; FILE *specfp[5]; if(!open_spec_files(specfp,5,outname,maxchar)) error = 1; else specfilesopen = 1; FILE **elecfp[10]; if( (print_elec) && (!open_elec_files(elecfp, outname, map, maxchar))) error = 1; else if(print_elec) elecfilesopen = 1; FILE *anglefp[4]; if( (print_angles) && (!open_angle_files(anglefp, outname, maxchar))) error = 1; else if(print_angles) anglefilesopen = 1; fprintf(infofp, "BONDS: %d\n", bonds); for(b=0; b<bonds; b++) fprintf(infofp, "%s %d %s %d\n", p_pb[b].resname1, p_pb[b].resnum1, p_pb[b].resname2, p_pb[b].resnum2); if( (strcmp(map.couptype, "pdc")!=0) && (strcmp(map.couptype, "tcc")!=0) ) { printf("Error: Please specify a coupling model (pdc or tcc).\n"); error = 1; } if(error) { if(anglefilesopen==1) close_angle_files(anglefp); if(elecfilesopen==1) close_elec_files(elecfp, map); if(infofileopen==1) fclose(infofp); if(specfilesopen==1) close_spec_files(specfp, 5); if(arraysset==1) unset_arrays(elecData, ProElecData, nelec, angleData, ProAngleData, nangles, freqData, ProFreqData, nfreq, coupData, ncoup, coordData, ProCoordData, dipData, centData, RotMat, ProRotMat, bonds, map.nsites, nPro, promap.nsites); if(reparray[0]!=NULL) for(i=0; i<nrep; i++) if(reparray[0][i]!=NULL) free(reparray[0][i]); if(reparray[1]!=NULL) for(i=0; i<nrep; i++) if(reparray[1][i]!=NULL) free(reparray[1][i]); if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_resarray(resarray, nres); free_amide_map(map); for(b=0; b<bonds; b++) free_bond(p_pb[b]); if(p_pb!=NULL) free(p_pb); return 0; } /************************************************************************ Step through trajectory and calculate parameters. **************************************************************************/ real dipstrength = 0.0; do { // Read coordinates for all amide bonds and set molecular frame axes/rotation matrix. get_coordinates(pbc, top, x, map, p_pb, bonds, RotMat, coordData); if(promap.nsites!=0) get_coordinates(pbc, top, x, promap, p_ppb, nPro, ProRotMat, ProCoordData); // Calculate electrostatic values around each bond. get_electrostatics(pbc, top, x, map, p_pb, RotMat, bonds, coordData, elecData, nelec, nthreads, cutoff, nchunks, START, STOP); if(promap.nsites!=0) get_electrostatics(pbc, top, x, promap, p_ppb, ProRotMat, nPro, ProCoordData, ProElecData, nelec, nthreads, cutoff, nchunks, START, STOP); // Calculate dihedral angles for each bond. get_angles(&pbc, top, x, map, p_pb, bonds, angleData, nangles); if(promap.nsites!=0) get_angles(&pbc, top, x, promap, p_ppb, nPro, ProAngleData, nangles); // Calculate frequencies. get_freq(map, p_pb, bonds, elecData, nelec, angleData, nangles, freqData, nfreq); if(promap.nsites!=0) get_freq(promap, p_ppb, nPro, ProElecData, nelec, ProAngleData, nangles, ProFreqData, nfreq); // Graft in proline frequencies if(promap.nsites!=0) { for(b=0; b<bonds; b++) { if(p_pb[b].isPro) { freqData[b][0] = ProFreqData[p_pb[b].isPro-1][0]; freqData[b][1] = ProFreqData[p_pb[b].isPro-1][1]; } } } if( (osc>0) && (map.dipset) && (dipstrength==0.0) ) { dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); printf("Average dipole strength: %6.4f Debye\n", dipstrength); printf("Re-scaling map dipole moment.\n"); for(i=0; i<3; i++) map.dip[i] *= (sqrt(osc)/dipstrength); dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); printf("Re-scaled average dipole strength: %6.4f Debye\n", dipstrength); } else if( (osc<0) && (map.dipset) && (dipstrength==0.0) ) { real defosc = 0.276*0.276; dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); for(i=0; i<3; i++) map.dip[i] *= (sqrt(defosc)/dipstrength); dipstrength = sqrt(map.dip[0]*map.dip[0] + map.dip[1]*map.dip[1] + map.dip[2]*map.dip[2]); printf("Assigning average dipole strength: %6.4f Debye\n", dipstrength); } else if( (osc<0) && (!map.dipset) && (dipstrength==0.0) ) { dipstrength = get_avg_tc_dip(pbc, x, map, p_pb, bonds, RotMat); printf("Using TCC-defined dipole strength: %6.4f Debye\n", dipstrength); } else if( (osc>0) && (map.tcset) && (dipstrength==0.0) ) { dipstrength = get_avg_tc_dip(pbc, x, map, p_pb, bonds, RotMat); printf("Input dipole strength: %6.4f Debye\n", dipstrength); printf("Re-scaling map charges and charge fluxes.\n"); for(i=0; i<map.ncoupsites; i++) map.CoupSites[i].q *= (sqrt(osc)/dipstrength); for(i=0; i<map.ncoupsites; i++) map.CoupSites[i].dq *= (sqrt(osc)/dipstrength); dipstrength = get_avg_tc_dip(pbc, x, map, p_pb, bonds, RotMat); printf("Re-scaled dipole strength: %6.4f Debye\n", dipstrength); } // If a dipole moment was specified, use it. if(map.dipset) get_dip(pbc, x, map, p_pb, bonds, elecData, nelec, RotMat, dipData, centData); // Otherwise, try to calculate from TCC parameters. else if(map.tcset) get_tc_dip(pbc, x, map, p_pb, bonds, RotMat, dipData, centData); // If PDC was specified, calculate PDC couplings. if(!strcmp(map.couptype, "pdc")) if(get_pdc(pbc, p_pb, bonds, dipData, centData, coupData, ncoup)) error = 1; // If TCC was specified, calculate TCC couplings. if(!strcmp(map.couptype, "tcc")) if(get_tcc(pbc, map, x, p_pb, bonds, RotMat, coupData, ncoup)) error = 1; //if(!strcmp(map.couptype, "tcc")) if(get_tcc_jansen(pbc, map, x, p_pb, bonds, RotMat, coupData, ncoup)) error = 1; // If using NNC map, get nearest-neighbor couplings. if(nnc) get_nnc(map, p_pb, bonds, angleData, coupData); if(dnnc) get_dnnc(map, p_pb, bonds, angleData, coupData); // Write data to files. write_spec_data(specfp, coupData, freqData, dipData, bonds); // If saving electrostatic data, write it to file now. if(print_elec) write_elec_dat(elecfp, map, elecData, bonds, nelec); // If saving angle data, write it to file now. if(print_angles) write_angle_dat(anglefp, angleData, bonds, nangles); // If we ran into an error, don't read any more frames. // Otherwise, continue on toward victory! if(error) trrStatus = 0; else trrStatus = read_next_x(oenv, status, &t, natoms, x, box); set_pbc(&pbc, ePBC, box); // Re-set pbc using new box } while(trrStatus); if(anglefilesopen==1) close_angle_files(anglefp); if(elecfilesopen==1) close_elec_files(elecfp, map); if(infofileopen==1) fclose(infofp); if(specfilesopen==1) close_spec_files(specfp, 5); if(arraysset==1) unset_arrays(elecData, ProElecData, nelec, angleData, ProAngleData, nangles, freqData, ProFreqData, nfreq, coupData, ncoup, coordData, ProCoordData, dipData, centData, RotMat, ProRotMat, bonds, map.nsites, nPro, promap.nsites); if(reparray[0]!=NULL) for(i=0; i<nrep; i++) if(reparray[0][i]!=NULL) free(reparray[0][i]); if(reparray[1]!=NULL) for(i=0; i<nrep; i++) if(reparray[1][i]!=NULL) free(reparray[1][i]); if(reparray[0]!=NULL) free(reparray[0]); if(reparray[1]!=NULL) free(reparray[1]); free_resarray(resarray, nres); free_amide_map(map); for(b=0; b<bonds; b++) free_bond(p_pb[b]); if(p_pb!=NULL) free(p_pb); return 0; }
cp_up.c
//*computethepotentialenergyofacollectionof*/ //*particlesinteractingviapairwisepotential*/ // compile -lm //liyp 10.5 #include<stdio.h> #include<stdlib.h> #include<math.h> //#include<windows.h> #include<time.h> #include<omp.h> //liyp 10.05 //#define NPARTS 1000 #define NPARTS 10000 #define NITER 201 #define DIMS 3 int rand(void); double computePot(); void initPositions(void); void updatePositions(void); double r[DIMS][NPARTS]; double pot; double distx,disty,distz,dist; double **poot=NULL; int main(){ int i; double time = 0.0; poot = (double **)malloc(sizeof(double *)*NPARTS); for(i=0; i<NPARTS; i++) *(poot+i)=(double *)malloc(sizeof(double)*NPARTS); initPositions(); updatePositions(); for(i=0;i<NITER;i++) { pot = 0; time += computePot(); if(i%10 == 0) printf("%5d:Potential:%10.7f \n",i,pot); updatePositions(); } printf ( " Time = %g seconds.\n", time); for(i=0; i<NPARTS; i++) free(*(poot+i)); free(poot); } void initPositions(){ int i,j; for(i=0;i<DIMS;i++) for(j=0;j<NPARTS;j++) r[i][j] = 0.5+((double)rand()/(double)RAND_MAX); } void updatePositions(){ int i,j; for(i=0;i<DIMS;i++) for(j=0;j<NPARTS;j++) r[i][j] -= 0.5+((double)rand()/(double)RAND_MAX); } double computePot(){ int i,j; double wtime; wtime = omp_get_wtime(); #pragma omp parallel for schedule(static) private(distx,disty,distz,dist,i,j) num_threads(7) for(i=0;i<NPARTS;i++){ for(j=0;j<i-1;j++){ distx = pow((r[0][j]-r[0][i]),2); disty = pow((r[1][j]-r[1][i]),2); distz = pow((r[2][j]-r[2][i]),2); dist = sqrt(distx+disty+distz); //pot = pot + 1.0/dist; poot[i][j] = 1.0/dist; } } wtime = omp_get_wtime() - wtime; for(i=0;i<NPARTS;i++){ for(j=0;j<i-1;j++){ pot = pot + poot[i][j]; } } return wtime; }
GB_binop__pow_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fc64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__pow_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fc64) // C=scalar+B GB (_bind1st__pow_fc64) // C=scalar+B' GB (_bind1st_tran__pow_fc64) // C=A+scalar GB (_bind2nd__pow_fc64) // C=A'+scalar GB (_bind2nd_tran__pow_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_cpow (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_cpow (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FC64 || GxB_NO_POW_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__pow_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__pow_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_cpow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_cpow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_cpow (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_cpow (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) LLVM_DELETED_FUNCTION; void operator=(const Sema &) LLVM_DELETED_FUNCTION; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); static bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return !Old->isHidden() || New->isExternallyVisible(); } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief A mapping from external names to the most recent /// locally-scoped extern "C" declaration with that name. /// /// This map contains external declarations introduced in local /// scopes, e.g., /// /// \code /// extern "C" void f() { /// void foo(int, int); /// } /// \endcode /// /// Here, the name "foo" will be associated with the declaration of /// "foo" within f. This name is not visible outside of /// "f". However, we still find it in two cases: /// /// - If we are declaring another global or extern "C" entity with /// the name "foo", we can find "foo" as a previous declaration, /// so that the types of this external declaration can be checked /// for compatibility. /// /// - If we would implicitly declare "foo" (e.g., due to a call to /// "foo" in C when no prototype or definition is visible), then /// we find this declaration of "foo" and complain that it is /// not visible. llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternCDecls; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::DenseMap<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template<typename T1> class BoundTypeDiagnoser1 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; public: BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << T; } virtual ~BoundTypeDiagnoser1() { } }; template<typename T1, typename T2> class BoundTypeDiagnoser2 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; public: BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1, const T2 &Arg2) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T; } virtual ~BoundTypeDiagnoser2() { } }; template<typename T1, typename T2, typename T3> class BoundTypeDiagnoser3 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; const T3 &Arg3; public: BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2), Arg3(Arg3) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T; } virtual ~BoundTypeDiagnoser3() { } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); public: bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template<typename T1> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo &Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool warn, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, bool instance); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); StmtResult ActOnSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; bool HasTrailingLParen; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl, bool HasTrailingLParen); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E, bool IsThrownVarInScope); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addMallocAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS, bool HasTrailingLParen); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope, bool IsInstantiation = false); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *" or "NSString *" depending on the type of /// ValueType, which is allowed to be a built-in numeric type or /// "char *" or "const char *". ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); typedef LazyVector<CXXRecordDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDynamicClasses, 2, 2> DynamicClassesType; /// \brief A list of all of the dynamic classes in this translation /// unit. DynamicClassesType DynamicClasses; /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template<typename T1> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); // FIXME: Replace this with a constructor once we can use delegating // constructors in llvm. void Initialize( ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; InstantiatingTemplate& operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S): S(S) { SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); /// \brief Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); public: ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); StmtResult ActOnOpenMPExecutableDirective(OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause * ActOnOpenMPVarListClause(OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args, unsigned NumParams, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTWriter; public: /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSLocalManglingNumber() const { return CurScope->incrementMSLocalManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: quanwang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static double get_current_time() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000.0 + tv.tv_usec / 1000.0; } #endif static int get_private_mem_size(struct ir_tensor* filter) { if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; // caution } static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* dequant uint8 weight to fp32 for simulator */ float* weight_fp32 = (float* )priv_info->interleave_buffer; uint8_t* weight_uint8 = (uint8_t*)filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t* data_img, float* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t* data_img, int8_t* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t * out = data_col + (c * outh + h) * outw; const int8_t * end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size); void* im2col_buf = (void*)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { printf("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img = pB + i; float* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif // __SSE__ tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; float* output4 = pC + (i + 4) * N; float* output5 = pC + (i + 5) * N; float* output6 = pC + (i + 6) * N; float* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { float* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif // __AVX__ output += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #else float sum0 = 0.f; #endif // __AVX__ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; //#pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int32_t* output4 = pC + (i + 4) * N; int32_t* output5 = pC + (i + 5) * N; int32_t* output6 = pC + (i + 6) * N; int32_t* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } output += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0.f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int* bias_int32 = NULL; float bias_scale = 0.f; if (bias) { bias_int32 = ( int* )bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float )bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t * bias_int32 = NULL; if (bias) bias_int32 = ( int* )bias->data + outchan_g * group; float input_scale = input->scale; float* kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t* filter_sgemm = interleave_int8; int8_t* input_sgemm_pack4 = im2col_pack4_int8; int32_t* output_sgemm_int32 = (int32_t*)sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float* output_sgemm_fp32 = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; // simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter) { int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; const float* k4 = pA + (p + 4) * K; const float* k5 = pA + (p + 5) * K; const float* k6 = pA + (p + 6) * K; const float* k7 = pA + (p + 7) * K; float* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info) { int8_t* pA = ( int8_t * )priv_info->interleave_buffer; int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; const int8_t* k4 = pA + (p + 4) * K; const int8_t* k5 = pA + (p + 5) * K; const int8_t* k6 = pA + (p + 6) * K; const int8_t* k7 = pA + (p + 7) * K; int8_t* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t* k0 = pA + (p + 0) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void* im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { printf("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif using namespace json11; namespace LightGBM { /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const Config* config) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian, Json& forced_split_json) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK(tree->num_leaves() <= data_partition_->num_leaves()); #pragma omp parallel for schedule(static) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; protected: virtual std::vector<int8_t> GetUsedFeatures(); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /* Force splits with forced_split_json dict and then return num splits forced.*/ virtual int32_t ForceSplits(Tree* tree, Json& forced_split_json, int* left_leaf, int* right_leaf, int* cur_depth, bool *aborted_last_force_split); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; double CalculateOndemandCosts(int feature_index, int leaf_index); /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; std::vector<int> valid_feature_indices_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; #endif /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; int num_threads_; std::vector<int> ordered_bin_indices_; bool is_constant_hessian_; std::vector<bool> feature_used; std::vector<uint32_t> feature_used_in_data; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
dragonfly3_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * based on rawSHA256_fmt.c code * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * The DragonFly BSD 2.10.1-REL crypt-sha2 hashes are seriously broken. See * http://www.openwall.com/lists/john-dev/2012/01/16/1 * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_dragonfly3_32; extern struct fmt_main fmt_dragonfly3_64; #elif FMT_REGISTERS_H john_register_one(&fmt_dragonfly3_32); john_register_one(&fmt_dragonfly3_64); #else #include "sha2.h" #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 4096 // tuned on K8-dual HT #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL_32 "dragonfly3-32" #define FORMAT_LABEL_64 "dragonfly3-64" #define FORMAT_NAME_32 "DragonFly BSD $3$ w/ bug, 32-bit" #define FORMAT_NAME_64 "DragonFly BSD $3$ w/ bug, 64-bit" #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 44 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE_32 (1+4+8) // 1st char is length #define SALT_SIZE_64 (1+8+8) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests_32[] = { {"$3$z$EBG66iBCGfUfENOfqLUH/r9xQxI1cG373/hRop6j.oWs", "magnum"}, {"$3$f6daU5$Xf/u8pKp.sb4VCLKz7tTZMUKJ3J4oOfZgUSHYOFL.M0n", ""}, {"$3$PNPA2tJ$ppD4bXqPMYFVdYVYrxXGMWeYB6Xv8e6jmXbvrB5V.okl", "password"}, {"$3$jWhDSrS$bad..Dy7UAyabPyfrEi3fgQ2qtT.5fE7C5EMNo/n.Qk5", "John the Ripper"}, {"$3$SSYEHO$hkuDmUQHT2Tr0.ai.lUVyb9bCC875Up.CZVa6UJZ.Muv", "DragonFly BSD"}, {"$3$pomO$a2ltqo.LlUSt1DG68sv2FZOdLcul0gYQ3xmn6z0G.I6Y", "123"}, {"$3$F$8Asqp58WwQ3WDMhaR3yQMSJGdCtpBqckemkCSNnJ.gRr", "12345678"}, {NULL} }; static struct fmt_tests tests_64[] = { {"$3$z$sNV7KLtLxvJRsj2MfBtGZFuzXP3CECITaFq/rvsy.Y.Q", "magnum"}, {"$3$f6daU5$eV2SX9vUHTMsoy3Ic7cWiQ4mOxyuyenGjYQWkJmy.AF3", ""}, {"$3$PNPA2tJ$GvXjg6zSge3YDh5I35JlYZHoQS2r0/.vn36fQzSY.A0d", "password"}, {"$3$jWhDSrS$5yBH7KFPmsg.PhPeDMj1MY4fv9061zdbYumPe2Ve.Y5J", "John the Ripper"}, {"$3$SSYEHO$AMYLyanRYs8F2U07FsBrSFuOIygJ4kgqvpBB17BI.61N", "DragonFly BSD"}, {"$3$e$TzMK1ePmjnZI/YbGes/1PAKqbj8aOV31Hf8Tz9es.kkq", "123"}, {"$3$XcMa$idKoaBQXdRlhfJFDjnV0jDryW/nEBAGXONyzJvnH.cR3", "12345678"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out) [(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)]; static char *cur_salt; static int salt_len; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *start; if (strncmp(ciphertext, "$3$", 3)) return 0; ciphertext += 3; for (pos = ciphertext; *pos && *pos != '$'; pos++); if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0; start = ++pos; while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++; if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0; return 1; } #define TO_BINARY(b1, b2, b3) \ value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out[b1] = value >> 16; \ out[b2] = value >> 8; \ out[b3] = value; static void *get_binary(char *ciphertext) { static ARCH_WORD_32 outbuf[BINARY_SIZE/4]; ARCH_WORD_32 value; char *pos; unsigned char *out = (unsigned char*)outbuf; int i; pos = strrchr(ciphertext, '$') + 1; for (i = 0; i < 10; i++) { TO_BINARY(i, i + 11, i + 21); } value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); out[10] = value >> 16; out[31] = value >> 8; return (void *)out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { SHA256_CTX ctx; SHA256_Init(&ctx); /* First the password */ SHA256_Update(&ctx, saved_key[index], saved_len[index]); /* Then the salt, including the $3$ magic */ SHA256_Update(&ctx, cur_salt, salt_len); SHA256_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static void set_salt(void *salt) { salt_len = (int)*(char*)salt; cur_salt = (char*)salt + 1; } // For 32-bit version of the bug, our magic is "$3$\0" len 4 static void *get_salt_32(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_32); ciphertext += 3; strcpy(&out[1], "$3$"); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[5], ciphertext, len); out[0] = len + 4; return out; } // For 64-bit version of the bug, our magic is "$3$\0sha5" len 8 static void *get_salt_64(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_64); ciphertext += 3; memcpy(&out[1], "$3$\0sha5", 8); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[9], ciphertext, len); out[0] = len + 8; return out; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { unsigned char *s = (unsigned char*)salt + 1; unsigned int hash = 5381; unsigned int i; for (i = 0; i < *(unsigned char*)salt; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_dragonfly3_32 = { { FORMAT_LABEL_32, FORMAT_NAME_32, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_32, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, tests_32 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_32, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_dragonfly3_64 = { { FORMAT_LABEL_64, FORMAT_NAME_64, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_64, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, tests_64 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_64, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__abs_uint32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_uint16 // op(A') function: GB_tran__abs_uint32_uint16 // C type: uint32_t // A type: uint16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_uint16 ( uint32_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
level.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #include <math.h> //------------------------------------------------------------------------------------------------------------------------------ #ifdef USE_MPI #include <mpi.h> #endif #include <omp.h> //------------------------------------------------------------------------------------------------------------------------------ #include "level.h" #include "operators.h" //------------------------------------------------------------------------------------------------------------------------------ #define BOX_ALIGN_1ST_CELL 4096 // Align to TLB page... //------------------------------------------------------------------------------------------------------------------------------ // // / 24 25 26 / // / 21 22 23 / // / 18 19 20 / // // / 15 16 17 / // / 12 13 14 / // / 9 10 11 / // // / 6 7 8 / // / 3 4 5 / // / 0 1 2 / // int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0}; int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0}; int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1}; //------------------------------------------------------------------------------------------------------------------------------ void print_communicator(int printSendRecv, int rank, int level, communicator_type *comm){ int i; printf("rank=%2d level=%d ",rank,level); if(printSendRecv & 0x1){ printf("num_sends=%2d ",comm->num_sends); printf("send_ranks=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_ranks[i]);printf("] "); printf("send_sizes=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_sizes[i]);printf("] "); printf("send_buffers=[ ");for(i=0;i<comm->num_sends;i++)printf("%08lx ",(uint64_t)comm->send_buffers[i]);printf("] "); for(i=0;i<comm->num_blocks[0];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[0][i].dim.i,comm->blocks[0][i].dim.j,comm->blocks[0][i].dim.k,comm->blocks[0][i].read.i,comm->blocks[0][i].read.j,comm->blocks[0][i].read.k,comm->blocks[0][i].read.jStride,comm->blocks[0][i].read.kStride,comm->blocks[0][i].write.i,comm->blocks[0][i].write.j,comm->blocks[0][i].write.k,comm->blocks[0][i].write.jStride,comm->blocks[0][i].write.kStride); printf("\n"); } if(printSendRecv & 0x2){ for(i=0;i<comm->num_blocks[1];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[1][i].dim.i,comm->blocks[1][i].dim.j,comm->blocks[1][i].dim.k,comm->blocks[1][i].read.i,comm->blocks[1][i].read.j,comm->blocks[1][i].read.k,comm->blocks[1][i].read.jStride,comm->blocks[1][i].read.kStride,comm->blocks[1][i].write.i,comm->blocks[1][i].write.j,comm->blocks[1][i].write.k,comm->blocks[1][i].write.jStride,comm->blocks[1][i].write.kStride); printf("\n"); } if(printSendRecv & 0x4){ printf("num_recvs=%2d ",comm->num_recvs); printf("recv_ranks=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_ranks[i]);printf("] "); printf("recv_sizes=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_sizes[i]);printf("] "); printf("recv_buffers=[ ");for(i=0;i<comm->num_recvs;i++)printf("%08lx ",(uint64_t)comm->recv_buffers[i]);printf("] "); for(i=0;i<comm->num_blocks[2];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[2][i].dim.i,comm->blocks[2][i].dim.j,comm->blocks[2][i].dim.k,comm->blocks[2][i].read.i,comm->blocks[2][i].read.j,comm->blocks[2][i].read.k,comm->blocks[2][i].read.jStride,comm->blocks[2][i].read.kStride,comm->blocks[2][i].write.i,comm->blocks[2][i].write.j,comm->blocks[2][i].write.k,comm->blocks[2][i].write.jStride,comm->blocks[2][i].write.kStride); printf("\n"); } fflush(stdout); } //------------------------------------------------------------------------------------------------------------------------------ typedef struct { int sendRank; int sendBoxID; int sendBox; int sendDir; int recvRank; int recvBoxID; int recvBox; } GZ_type; int qsortGZ(const void *a, const void*b){ GZ_type *gza = (GZ_type*)a; GZ_type *gzb = (GZ_type*)b; // by convention, MPI buffers are first sorted by sendRank if(gza->sendRank < gzb->sendRank)return(-1); if(gza->sendRank > gzb->sendRank)return( 1); // then by sendBoxID if(gza->sendBoxID < gzb->sendBoxID)return(-1); if(gza->sendBoxID > gzb->sendBoxID)return( 1); // and finally by the direction sent if(gza->sendDir < gzb->sendDir)return(-1); if(gza->sendDir > gzb->sendDir)return( 1); return(0); } int qsortInt(const void *a, const void *b){ int *ia = (int*)a; int *ib = (int*)b; if(*ia < *ib)return(-1); if(*ia > *ib)return( 1); return( 0); } //------------------------------------------------------------------------------------------------------------------------------ int create_box(box_type *box, int numVectors, int dim, int ghosts){ uint64_t memory_allocated = 0; box->numVectors = numVectors; box->dim = dim; box->ghosts = ghosts; #ifndef BOX_SIMD_ALIGNMENT #define BOX_SIMD_ALIGNMENT 1 // allignment requirement for j+/-1 #endif box->jStride = (dim+2*ghosts);while(box->jStride % BOX_SIMD_ALIGNMENT)box->jStride++; // pencil #ifndef BOX_PLANE_PADDING #define BOX_PLANE_PADDING 8 // scratch space to avoid unrolled loop cleanup #endif box->kStride = box->jStride*(dim+2*ghosts); // plane if(box->jStride<BOX_PLANE_PADDING)box->kStride += (BOX_PLANE_PADDING-box->jStride); // allow the ghost zone to be clobbered... while(box->kStride % BOX_SIMD_ALIGNMENT)box->kStride++; #if 0 // pad each plane such that // 1. it is greater than a multiple of the maximum unrolling (or vector length) // 2. it carries the same alignement as the plane above/below. i.e. if ijk is aligned, ijkk+/-plane is aligned //int MaxUnrolling = 8; // 2-way SIMD x unroll by 4 = 8/thread int MaxUnrolling = 16; // 4-way SIMD x unroll by 4 = 16/thread //int MaxUnrolling = 32; // 8-way SIMD x unroll by 4 = 32/thread int paddingToAvoidStencilCleanup = 0; if(box->jStride < (MaxUnrolling-1)){paddingToAvoidStencilCleanup = (MaxUnrolling-1)-(box->jStride);} // allows partial clobbering of ghost zone //box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0xF) & ~0xF; // multiple of 128 bytes box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0x7) & ~0x7; // multiple of 64 bytes (required for MIC) //box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0x3) & ~0x3; // multiple of 32 bytes (required for AVX/QPX) //box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0x1) & ~0x1; // multiple of 16 bytes (required for SSE) #endif box->volume = (dim+2*ghosts)*box->kStride; // allocate an array of pointers to vectors... box->vectors = (double **)malloc(box->numVectors*sizeof(double*)); memory_allocated += box->numVectors*sizeof(double*); if((box->numVectors>0)&&(box->vectors==NULL)){printf("malloc failed - create_box/box->vectors\n");fflush(stdout);exit(0);} // allocate one aligned, double-precision array and divide it among vectors... uint64_t malloc_size = box->volume*box->numVectors*sizeof(double) + BOX_ALIGN_1ST_CELL; // shift pointer by up to 1 TLB page... box->vectors_base = (double*)malloc(malloc_size); memory_allocated += malloc_size; if((box->numVectors>0)&&(box->vectors_base==NULL)){printf("malloc failed - create_box/box->vectors_base\n");fflush(stdout);exit(0);} double * tmpbuf = box->vectors_base; while( (uint64_t)(tmpbuf+box->ghosts*(1+box->jStride+box->kStride)) & (BOX_ALIGN_1ST_CELL-1) ){tmpbuf++;} // allign first *non-ghost* zone element of first component to page boundary... memset(tmpbuf,0,box->volume*box->numVectors*sizeof(double)); // zero to avoid 0.0*NaN or 0.0*Inf int c;for(c=0;c<box->numVectors;c++){box->vectors[c] = tmpbuf + c*box->volume;} // done... return the total amount of memory allocated... return(memory_allocated); } //------------------------------------------------------------------------------------------------------------------------------ void add_vectors_to_box(box_type *box, int numAdditionalVectors){ if(numAdditionalVectors<=0)return; // nothing to do double * old_bp = box->vectors_base; // save a pointer to the base pointer for subsequent free... double * old_v0 = box->vectors[0]; // save a pointer to the old FP data... double ** old_v = box->vectors; // save a pointer to the old array of pointers... box->numVectors+=numAdditionalVectors; // box->vectors = (double **)malloc(box->numVectors*sizeof(double*)); // new array of pointers vectors if((box->numVectors>0)&&(box->vectors==NULL)){printf("malloc failed - add_vectors_to_box/box->vectors\n");fflush(stdout);exit(0);} // NOTE !!! realloc() cannot guarantee the same alignment... malloc, allign, copy... uint64_t malloc_size = box->volume*box->numVectors*sizeof(double) + BOX_ALIGN_1ST_CELL; // shift pointer by up to 1 TLB page... box->vectors_base = (double*)malloc(malloc_size); if((box->numVectors>0)&&(box->vectors_base==NULL)){printf("malloc failed - add_vectors_to_box/box->vectors_base\n");fflush(stdout);exit(0);} double * tmpbuf = box->vectors_base; while( (uint64_t)(tmpbuf+box->ghosts*(1+box->jStride+box->kStride)) & (BOX_ALIGN_1ST_CELL-1) ){tmpbuf++;} // allign first *non-ghost* zone element of first component to page boundary... memset(tmpbuf,0,box->volume*box->numVectors*sizeof(double)); // zero to avoid 0.0*NaN or 0.0*Inf memcpy(tmpbuf,old_v0,box->volume*(box->numVectors-numAdditionalVectors)*sizeof(double)); // copy any existant data over... int c;for(c=0;c<box->numVectors;c++){box->vectors[c] = tmpbuf + c*box->volume;} // pointer arithmetic... free(old_bp); // all vectors were created from one malloc + pointer arithmetic... free(old_v ); // free the list of pointers... } //------------------------------------------------------------------------------------------------------------------------------ void destroy_box(box_type *box){ free(box->vectors_base); // single allocate with pointer arithmetic... free(box->vectors); } //------------------------------------------------------------------------------------------------------------------------------ // should implement a 3D hilbert curve on non pow2 (but cubical) domain sizes //void decompose_level_hilbert(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){ //} //--------------------------------------------------------------------------------------------------------------------------------------------------- void decompose_level_kd_tree(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){ // recursive bisection (or prime-section) of the domain // can lead to imbalance unless the number of processes and number of boxes per process are chosen well #define numPrimes 13 int primes[numPrimes] = {41,37,31,29,23,19,17,13,11,7,5,3,2}; int i,j,k,p,f,ff; // base case, no further recursion... if( (ranks==1)|| ((idim==1)&&(jdim==1)&&(kdim==1)) ){ for(i=ilo;i<ilo+idim;i++){ for(j=jlo;j<jlo+jdim;j++){ for(k=klo;k<klo+kdim;k++){ int b = i + j*jStride + k*kStride; rank_of_box[b] = rank_lo; }}} return; } for(p=0;p<numPrimes;p++){f=primes[p]; // don't partition if the aspect ratio would be extreme if( (1.5*kdim>=idim)&&(1.5*kdim>=jdim) )if( (kdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo,jlo,klo+ff*kdim/f,idim,jdim,kdim/f,rank_lo+ff*ranks/f,ranks/f);return;} if( (1.5*jdim>=idim)&&(1.5*jdim>=kdim) )if( (jdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo,jlo+ff*jdim/f,klo,idim,jdim/f,kdim,rank_lo+ff*ranks/f,ranks/f);return;} if( (1.5*idim>=jdim)&&(1.5*idim>=kdim) )if( (idim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo+ff*idim/f,jlo,klo,idim/f,jdim,kdim,rank_lo+ff*ranks/f,ranks/f);return;} } // try and bisect the domain in the i-dimension if( (idim>=jdim)&&(idim>=kdim) ){ int dim0 = (int)(0.5*(double)idim + 0.50); int dim1 = idim-dim0; int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)idim ); int r1 = ranks-r0; decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,rank_lo ,r0); // lo decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,rank_lo+r0,r1); // hi return; } // try and bisect the domain in the j-dimension if( (jdim>=idim)&&(jdim>=kdim) ){ int dim0 = (int)(0.5*(double)jdim + 0.50); int dim1 = jdim-dim0; int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)jdim ); int r1 = ranks-r0; decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,rank_lo ,r0); // lo decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,rank_lo+r0,r1); // hi return; } // try and bisect the domain in the k-dimension if( (kdim>=idim)&&(kdim>=jdim) ){ int dim0 = (int)(0.5*(double)kdim + 0.50); int dim1 = kdim-dim0; int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)kdim ); int r1 = ranks-r0; decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,rank_lo ,r0); // lo decompose_level_kd_tree(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,rank_lo+r0,r1); // hi return; } printf("decompose_level_kd_tree failed !!!\n"); exit(0); } //--------------------------------------------------------------------------------------------------------------------------------------------------- void print_decomposition(level_type *level){ if(level->my_rank!=0)return; printf("\n"); int i,j,k; int jStride = level->boxes_in.i; int kStride = level->boxes_in.i*level->boxes_in.j; for(k=level->boxes_in.k-1;k>=0;k--){ // (i,j,k)=(0,0,0) is bottom left corner for(j=level->boxes_in.j-1;j>=0;j--){ // (i,j)=(0,0) is bottom left corner for(i=0;i<j;i++)printf(" "); for(i=0;i<level->boxes_in.i;i++){ int b = i + j*jStride + k*kStride; printf("%4d ",level->rank_of_box[b]); }printf("\n"); }printf("\n\n"); } fflush(stdout); } //------------------------------------------------------------------------------------------------------------------------------ void append_block_to_list(blockCopy_type ** blocks, int *allocated_blocks, int *num_blocks, int dim_i, int dim_j, int dim_k, int read_box, double* read_ptr, int read_i, int read_j, int read_k, int read_jStride, int read_kStride, int read_scale, int write_box, double* write_ptr, int write_i, int write_j, int write_k, int write_jStride, int write_kStride, int write_scale ){ int jj,kk; // Take a dim_j x dim_k iteration space and tile it into smaller faces of size BLOCKCOPY_TILE_J x BLOCKCOPY_TILE_K // This increases the number of blockCopies in the ghost zone exchange and thereby increases the thread-level parallelism // ghostZone: read_scale=1, write_scale=1 // interpolation: read_scale=1, write_scale=2 // restriction: read_scale=2, write_scale=1 for(kk=0;kk<dim_k;kk+=BLOCKCOPY_TILE_K){ for(jj=0;jj<dim_j;jj+=BLOCKCOPY_TILE_J){ int dim_k_mod = dim_k-kk;if(dim_k_mod>BLOCKCOPY_TILE_K)dim_k_mod=BLOCKCOPY_TILE_K; int dim_j_mod = dim_j-jj;if(dim_j_mod>BLOCKCOPY_TILE_J)dim_j_mod=BLOCKCOPY_TILE_J; if(*num_blocks >= *allocated_blocks){ *allocated_blocks = *allocated_blocks + 100; *blocks = (blockCopy_type *)realloc((void*)(*blocks),(*allocated_blocks)*sizeof(blockCopy_type)); if(*blocks == NULL){printf("realloc failed - append_block_to_list\n");fflush(stdout);exit(0);} } (*blocks)[*num_blocks].dim.i = dim_i; (*blocks)[*num_blocks].dim.j = dim_j_mod; (*blocks)[*num_blocks].dim.k = dim_k_mod; (*blocks)[*num_blocks].read.box = read_box; (*blocks)[*num_blocks].read.ptr = read_ptr; (*blocks)[*num_blocks].read.i = read_i; (*blocks)[*num_blocks].read.j = read_j + read_scale*jj; (*blocks)[*num_blocks].read.k = read_k + read_scale*kk; (*blocks)[*num_blocks].read.jStride = read_jStride; (*blocks)[*num_blocks].read.kStride = read_kStride; (*blocks)[*num_blocks].write.box = write_box; (*blocks)[*num_blocks].write.ptr = write_ptr; (*blocks)[*num_blocks].write.i = write_i; (*blocks)[*num_blocks].write.j = write_j + write_scale*jj; (*blocks)[*num_blocks].write.k = write_k + write_scale*kk; (*blocks)[*num_blocks].write.jStride = write_jStride; (*blocks)[*num_blocks].write.kStride = write_kStride; (*num_blocks)++; }} } //---------------------------------------------------------------------------------------------------------------------------------------------------- // create a mini program that packs data into MPI recv buffers, exchanges local data, and unpacks the MPI send buffers // broadly speaking... // 1. traverse my list of Boxes and create a list of ghosts that must be sent // 2. create a list of neighbors to send to // 3. allocate and populate the pack list and allocate the send buffers // 4. allocate and populate the local exchange list // 5. traverse my list of Boxes and create a list of ghosts that must be received // 6. create a list of neighbors to receive from // 7. allocate and populate the unpack list and allocate the recv buffers // // thus a ghost zone exchange is // 1. prepost a Irecv for each MPI recv buffer (1 per neighbor) // 2. traverse the pack list // 3. post the Isends for each MPI send buffer (1 per neighbor) // 4. traverse the local copy list // 5. waitall // 6. traverse the unpack list void build_exchange_ghosts(level_type *level, int justFaces){ level->exchange_ghosts[justFaces].num_recvs = 0; level->exchange_ghosts[justFaces].num_sends = 0; level->exchange_ghosts[justFaces].num_blocks[0] = 0; level->exchange_ghosts[justFaces].num_blocks[1] = 0; level->exchange_ghosts[justFaces].num_blocks[2] = 0; int CommunicateThisDir[27]; int n;for(n=0;n<27;n++)CommunicateThisDir[n]=1;CommunicateThisDir[13]=0; if(justFaces)for(n=0;n<27;n++)CommunicateThisDir[n]=faces[n]; int sendBox,recvBox; int stage; int _rank; int ghost,numGhosts,numGhostsRemote; // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks GZ_type *ghostsToSend = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box. int *sendRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box. if(level->num_my_boxes>0){ if(ghostsToSend == NULL){printf("malloc failed - build_exchange_ghosts/ghostsToSend\n");fflush(stdout);exit(0);} if(sendRanks == NULL){printf("malloc failed - build_exchange_ghosts/sendRanks \n");fflush(stdout);exit(0);} } numGhosts = 0; numGhostsRemote = 0; for(sendBox=0;sendBox<level->num_my_boxes;sendBox++){ int di,dj,dk; for(dk=-1;dk<=1;dk++){ for(dj=-1;dj<=1;dj++){ for(di=-1;di<=1;di++){ int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){ int myBoxID = level->my_boxes[sendBox].global_box_id; int myBox_i = level->my_boxes[sendBox].low.i / level->box_dim; int myBox_j = level->my_boxes[sendBox].low.j / level->box_dim; int myBox_k = level->my_boxes[sendBox].low.k / level->box_dim; int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i; int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j; int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k; int neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j; if( level->rank_of_box[neighborBoxID] != -1 ){ ghostsToSend[numGhosts].sendRank = level->my_rank; ghostsToSend[numGhosts].sendBoxID = myBoxID; ghostsToSend[numGhosts].sendBox = sendBox; ghostsToSend[numGhosts].sendDir = dir; ghostsToSend[numGhosts].recvRank = level->rank_of_box[neighborBoxID]; ghostsToSend[numGhosts].recvBoxID = neighborBoxID; ghostsToSend[numGhosts].recvBox = -1; if( level->rank_of_box[neighborBoxID] != level->my_rank )sendRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];else{ int recvBox=0;while(level->my_boxes[recvBox].global_box_id!=neighborBoxID)recvBox++; // search my list of boxes for the appropriate recvBox index ghostsToSend[numGhosts].recvBox = recvBox; } numGhosts++; } }}}} } // sort boxes by sendRank(==my rank) then by sendBoxID... ensures the sends and receive buffers are always sorted by sendBoxID... qsort(ghostsToSend,numGhosts ,sizeof(GZ_type),qsortGZ ); // sort the lists of neighboring ranks and remove duplicates... qsort(sendRanks ,numGhostsRemote,sizeof( int),qsortInt); int numSendRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(sendRanks[ghost] != _rank){_rank=sendRanks[ghost];sendRanks[numSendRanks++]=sendRanks[ghost];} // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // in a two-stage process, traverse the list of ghosts and allocate the pack/local lists as well as the MPI buffers, and then populate the pack/local lists level->exchange_ghosts[justFaces].num_sends = numSendRanks; level->exchange_ghosts[justFaces].send_ranks = (int*)malloc(numSendRanks*sizeof(int)); level->exchange_ghosts[justFaces].send_sizes = (int*)malloc(numSendRanks*sizeof(int)); level->exchange_ghosts[justFaces].send_buffers = (double**)malloc(numSendRanks*sizeof(double*)); if(numSendRanks>0){ if(level->exchange_ghosts[justFaces].send_ranks ==NULL){printf("malloc failed - exchange_ghosts[%d].send_ranks\n",justFaces);fflush(stdout);exit(0);} if(level->exchange_ghosts[justFaces].send_sizes ==NULL){printf("malloc failed - exchange_ghosts[%d].send_sizes\n",justFaces);fflush(stdout);exit(0);} if(level->exchange_ghosts[justFaces].send_buffers==NULL){printf("malloc failed - exchange_ghosts[%d].send_buffers\n",justFaces);fflush(stdout);exit(0);} } level->exchange_ghosts[justFaces].blocks[0] = NULL; level->exchange_ghosts[justFaces].blocks[1] = NULL; level->exchange_ghosts[justFaces].num_blocks[0] = 0; level->exchange_ghosts[justFaces].num_blocks[1] = 0; level->exchange_ghosts[justFaces].allocated_blocks[0] = 0; level->exchange_ghosts[justFaces].allocated_blocks[1] = 0; for(stage=0;stage<=1;stage++){ // stage=0... traverse the list and calculate the buffer sizes // stage=1... allocate MPI send buffers, traverse the list, and populate the unpack/local lists... int neighbor; for(neighbor=0;neighbor<numSendRanks;neighbor++){ if(stage==1){ level->exchange_ghosts[justFaces].send_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[justFaces].send_sizes[neighbor]*sizeof(double)); if(level->exchange_ghosts[justFaces].send_sizes[neighbor]>0) if(level->exchange_ghosts[justFaces].send_buffers[neighbor]==NULL){printf("malloc failed - exchange_ghosts[%d].send_buffers[neighbor]\n",justFaces);fflush(stdout);exit(0);} memset(level->exchange_ghosts[justFaces].send_buffers[neighbor], 0,level->exchange_ghosts[justFaces].send_sizes[neighbor]*sizeof(double)); } level->exchange_ghosts[justFaces].send_ranks[neighbor]=sendRanks[neighbor]; level->exchange_ghosts[justFaces].send_sizes[neighbor]=0; } for(ghost=0;ghost<numGhosts;ghost++){ int dim_i,dim_j,dim_k; int send_i,send_j,send_k; int recv_i,recv_j,recv_k; // decode ghostsToSend[ghost].sendDir (direction sent) into di/dj/dk int di = ((ghostsToSend[ghost].sendDir % 3) )-1; int dj = ((ghostsToSend[ghost].sendDir % 9)/3)-1; int dk = ((ghostsToSend[ghost].sendDir / 9) )-1; switch(di){ // direction relative to sender case -1:send_i=0; dim_i=level->box_ghosts;recv_i= level->box_dim; break; case 0:send_i=0; dim_i=level->box_dim; recv_i=0; break; case 1:send_i=level->box_dim-level->box_ghosts;dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break; } switch(dj){ // direction relative to sender case -1:send_j=0; dim_j=level->box_ghosts;recv_j= level->box_dim; break; case 0:send_j=0; dim_j=level->box_dim; recv_j=0; break; case 1:send_j=level->box_dim-level->box_ghosts;dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break; } switch(dk){ // direction relative to sender case -1:send_k=0; dim_k=level->box_ghosts;recv_k= level->box_dim; break; case 0:send_k=0; dim_k=level->box_dim; recv_k=0; break; case 1:send_k=level->box_dim-level->box_ghosts;dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break; } // determine if this ghost requires a pack or local exchange int LocalExchange; // 0 = pack list, 1 = local exchange list if(ghostsToSend[ghost].recvRank != level->my_rank){ LocalExchange=0; // pack neighbor=0;while(level->exchange_ghosts[justFaces].send_ranks[neighbor] != ghostsToSend[ghost].recvRank)neighbor++; }else{ LocalExchange=1; // local neighbor=-1; } if(stage==1){ if(LocalExchange) // append to the local exchange list... append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[1]),&(level->exchange_ghosts[justFaces].allocated_blocks[1]),&(level->exchange_ghosts[justFaces].num_blocks[1]), /* dim.i = */ dim_i, /* dim.j = */ dim_j, /* dim.k = */ dim_k, /* read.box = */ ghostsToSend[ghost].sendBox, /* read.ptr = */ NULL, /* read.i = */ send_i, /* read.j = */ send_j, /* read.k = */ send_k, /* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride, /* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride, /* read.scale = */ 1, /* write.box = */ ghostsToSend[ghost].recvBox, /* write.ptr = */ NULL, /* write.i = */ recv_i, /* write.j = */ recv_j, /* write.k = */ recv_k, /* write.jStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].jStride, /* write.kStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].kStride, /* write.scale = */ 1 ); else // append to the MPI pack list... append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[0]),&(level->exchange_ghosts[justFaces].allocated_blocks[0]),&(level->exchange_ghosts[justFaces].num_blocks[0]), /* dim.i = */ dim_i, /* dim.j = */ dim_j, /* dim.k = */ dim_k, /* read.box = */ ghostsToSend[ghost].sendBox, /* read.ptr = */ NULL, /* read.i = */ send_i, /* read.j = */ send_j, /* read.k = */ send_k, /* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride, /* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride, /* read.scale = */ 1, /* write.box = */ -1, /* write.ptr = */ level->exchange_ghosts[justFaces].send_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks /* write.i = */ level->exchange_ghosts[justFaces].send_sizes[neighbor], // current offset in the MPI send buffer /* write.j = */ 0, /* write.k = */ 0, /* write.jStride = */ dim_i, // contiguous block /* write.kStride = */ dim_i*dim_j, // contiguous block /* write.scale = */ 1 );} if(neighbor>=0)level->exchange_ghosts[justFaces].send_sizes[neighbor]+=dim_i*dim_j*dim_k; } // ghost for-loop } // stage for-loop // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // free temporary storage... free(ghostsToSend); free(sendRanks); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks GZ_type *ghostsToRecv = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box. int *recvRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box. if(level->num_my_boxes>0){ if(ghostsToRecv == NULL){printf("malloc failed - build_exchange_ghosts/ghostsToRecv\n");fflush(stdout);exit(0);} if(recvRanks == NULL){printf("malloc failed - build_exchange_ghosts/recvRanks \n");fflush(stdout);exit(0);} } numGhosts = 0; numGhostsRemote = 0; for(recvBox=0;recvBox<level->num_my_boxes;recvBox++){ int di,dj,dk; for(dk=-1;dk<=1;dk++){ for(dj=-1;dj<=1;dj++){ for(di=-1;di<=1;di++){ int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){ int myBoxID = level->my_boxes[recvBox].global_box_id; int myBox_i = level->my_boxes[recvBox].low.i / level->box_dim; int myBox_j = level->my_boxes[recvBox].low.j / level->box_dim; int myBox_k = level->my_boxes[recvBox].low.k / level->box_dim; int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i; int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j; int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k; int neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j; if( (level->rank_of_box[neighborBoxID] != -1) && (level->rank_of_box[neighborBoxID] != level->my_rank) ){ ghostsToRecv[numGhosts].sendRank = level->rank_of_box[neighborBoxID]; ghostsToRecv[numGhosts].sendBoxID = neighborBoxID; ghostsToRecv[numGhosts].sendBox = -1; ghostsToRecv[numGhosts].sendDir = 26-dir; ghostsToRecv[numGhosts].recvRank = level->my_rank; ghostsToRecv[numGhosts].recvBoxID = myBoxID; ghostsToRecv[numGhosts].recvBox = recvBox; numGhosts++; recvRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID]; } }}}} } // sort boxes by sendRank then by sendBoxID... ensures the recvs and receive buffers are always sorted by sendBoxID... qsort(ghostsToRecv,numGhosts ,sizeof(GZ_type),qsortGZ ); // sort the lists of neighboring ranks and remove duplicates... qsort(recvRanks ,numGhostsRemote,sizeof( int),qsortInt); int numRecvRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(recvRanks[ghost] != _rank){_rank=recvRanks[ghost];recvRanks[numRecvRanks++]=recvRanks[ghost];} // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // in a two-stage process, traverse the list of ghosts and allocate the unpack lists as well as the MPI buffers, and then populate the unpack list level->exchange_ghosts[justFaces].num_recvs = numRecvRanks; level->exchange_ghosts[justFaces].recv_ranks = (int*)malloc(numRecvRanks*sizeof(int)); level->exchange_ghosts[justFaces].recv_sizes = (int*)malloc(numRecvRanks*sizeof(int)); level->exchange_ghosts[justFaces].recv_buffers = (double**)malloc(numRecvRanks*sizeof(double*)); if(numRecvRanks>0){ if(level->exchange_ghosts[justFaces].recv_ranks ==NULL){printf("malloc failed - exchange_ghosts[%d].recv_ranks\n",justFaces);fflush(stdout);exit(0);} if(level->exchange_ghosts[justFaces].recv_sizes ==NULL){printf("malloc failed - exchange_ghosts[%d].recv_sizes\n",justFaces);fflush(stdout);exit(0);} if(level->exchange_ghosts[justFaces].recv_buffers==NULL){printf("malloc failed - exchange_ghosts[%d].recv_buffers\n",justFaces);fflush(stdout);exit(0);} } level->exchange_ghosts[justFaces].blocks[2] = NULL; level->exchange_ghosts[justFaces].num_blocks[2] = 0; level->exchange_ghosts[justFaces].allocated_blocks[2] = 0; for(stage=0;stage<=1;stage++){ // stage=0... traverse the list and calculate the buffer sizes // stage=1... allocate MPI recv buffers, traverse the list, and populate the unpack/local lists... int neighbor; for(neighbor=0;neighbor<numRecvRanks;neighbor++){ if(stage==1){ level->exchange_ghosts[justFaces].recv_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[justFaces].recv_sizes[neighbor]*sizeof(double)); if(level->exchange_ghosts[justFaces].recv_sizes[neighbor]>0) if(level->exchange_ghosts[justFaces].recv_buffers[neighbor]==NULL){printf("malloc failed - exchange_ghosts[%d].recv_buffers[neighbor]\n",justFaces);fflush(stdout);exit(0);} memset(level->exchange_ghosts[justFaces].recv_buffers[neighbor], 0,level->exchange_ghosts[justFaces].recv_sizes[neighbor]*sizeof(double)); } level->exchange_ghosts[justFaces].recv_ranks[neighbor]=recvRanks[neighbor]; level->exchange_ghosts[justFaces].recv_sizes[neighbor]=0; } for(ghost=0;ghost<numGhosts;ghost++){ int dim_i,dim_j,dim_k; int send_i,send_j,send_k; int recv_i,recv_j,recv_k; // decode ghostsToRecv[ghost].sendDir (direction sent) into di/dj/dk int di = ((ghostsToRecv[ghost].sendDir % 3) )-1; int dj = ((ghostsToRecv[ghost].sendDir % 9)/3)-1; int dk = ((ghostsToRecv[ghost].sendDir / 9) )-1; switch(di){ // direction relative to sender case -1:dim_i=level->box_ghosts;recv_i= level->box_dim; break; case 0:dim_i=level->box_dim; recv_i=0; break; case 1:dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break; } switch(dj){ // direction relative to sender case -1:dim_j=level->box_ghosts;recv_j= level->box_dim; break; case 0:dim_j=level->box_dim; recv_j=0; break; case 1:dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break; } switch(dk){ // direction relative to sender case -1:dim_k=level->box_ghosts;recv_k= level->box_dim; break; case 0:dim_k=level->box_dim; recv_k=0; break; case 1:dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break; } // determine if this ghost requires a pack or local exchange neighbor=0;while(level->exchange_ghosts[justFaces].recv_ranks[neighbor] != ghostsToRecv[ghost].sendRank)neighbor++; if(stage==1)append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[2]),&(level->exchange_ghosts[justFaces].allocated_blocks[2]),&(level->exchange_ghosts[justFaces].num_blocks[2]), /*dim.i = */ dim_i, /*dim.j = */ dim_j, /*dim.k = */ dim_k, /*read.box = */ -1, /*read.ptr = */ level->exchange_ghosts[justFaces].recv_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks /*read.i = */ level->exchange_ghosts[justFaces].recv_sizes[neighbor], // current offset in the MPI recv buffer /*read.j = */ 0, /*read.k = */ 0, /*read.jStride = */ dim_i, // contiguous block /*read.kStride = */ dim_i*dim_j, // contiguous block /*read.scale = */ 1, /*write.box = */ ghostsToRecv[ghost].recvBox, /*write.ptr = */ NULL, /*write.i = */ recv_i, /*write.j = */ recv_j, /*write.k = */ recv_k, /*write.jStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].jStride, /*write.kStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].kStride, /*write.scale = */ 1 ); if(neighbor>=0)level->exchange_ghosts[justFaces].recv_sizes[neighbor]+=dim_i*dim_j*dim_k; } // ghost for-loop } // stage for-loop // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // free temporary storage... free(ghostsToRecv); free(recvRanks); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // malloc MPI requests/status arrays #ifdef USE_MPI level->exchange_ghosts[justFaces].requests = (MPI_Request*)malloc((level->exchange_ghosts[justFaces].num_sends+level->exchange_ghosts[justFaces].num_recvs)*sizeof(MPI_Request)); level->exchange_ghosts[justFaces].status = (MPI_Status *)malloc((level->exchange_ghosts[justFaces].num_sends+level->exchange_ghosts[justFaces].num_recvs)*sizeof(MPI_Status )); if((level->exchange_ghosts[justFaces].num_sends+level->exchange_ghosts[justFaces].num_recvs)>0){ if(level->exchange_ghosts[justFaces].requests==NULL){printf("malloc failed - exchange_ghosts[%d].requests\n",justFaces);fflush(stdout);exit(0);} if(level->exchange_ghosts[justFaces].status ==NULL){printf("malloc failed - exchange_ghosts[%d].status\n",justFaces);fflush(stdout);exit(0);} } #endif // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //print_communicator(4,level->my_rank,0,&level->exchange_ghosts[justFaces]); } //--------------------------------------------------------------------------------------------------------------------------------------------------- void create_level(level_type *level, int boxes_in_i, int box_dim, int box_ghosts, int box_vectors, int domain_boundary_condition, int MPI_Rank, int MPI_Tasks){ int box; int TotalBoxes = boxes_in_i*boxes_in_i*boxes_in_i; if(MPI_Rank==0){printf("attempting to create a %d^3 level using a %d^3 grid of %d^3 boxes and %d tasks...\n",box_dim*boxes_in_i,boxes_in_i,box_dim,MPI_Tasks);fflush(stdout);} int omp_threads = 1; int omp_nested = 0; #pragma omp parallel { #pragma omp master { omp_threads = omp_get_num_threads(); omp_nested = omp_get_nested(); } } level->memory_allocated = 0; level->box_dim = box_dim; level->box_ghosts = box_ghosts; level->box_vectors = box_vectors; level->boxes_in.i = boxes_in_i; level->boxes_in.j = boxes_in_i; level->boxes_in.k = boxes_in_i; level->dim.i = box_dim*level->boxes_in.i; level->dim.j = box_dim*level->boxes_in.j; level->dim.k = box_dim*level->boxes_in.k; level->active = 1; level->my_rank = MPI_Rank; level->num_ranks = MPI_Tasks; level->domain_boundary_condition = domain_boundary_condition; level->alpha_is_zero = -1; level->num_threads = omp_threads; // intra-box threading... level->threads_per_box = omp_threads; level->concurrent_boxes = 1; // inter-box threading... //level->threads_per_box = 1; //level->concurrent_boxes = omp_threads; // allocate 3D array of integers to hold the MPI rank of the corresponding box level->rank_of_box = (int*)malloc(level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int)); if(level->rank_of_box==NULL){printf("malloc of level->rank_of_box failed\n");fflush(stdout);exit(0);} level->memory_allocated += (level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int)); for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){level->rank_of_box[box]=-1;} // -1 denotes that there is no actual box assigned to this region // parallelize the grid... decompose_level_kd_tree(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,0,MPI_Tasks); //print_decomposition(level);// for debug purposes only // build my list of boxes... level->num_my_boxes=0; for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){if(level->rank_of_box[box]==level->my_rank)level->num_my_boxes++;} level->my_boxes = (box_type*)malloc(level->num_my_boxes*sizeof(box_type)); if((level->num_my_boxes>0)&&(level->my_boxes==NULL)){printf("malloc failed - create_level/level->my_boxes\n");fflush(stdout);exit(0);} box=0; int i,j,k; for(k=0;k<level->boxes_in.k;k++){ for(j=0;j<level->boxes_in.j;j++){ for(i=0;i<level->boxes_in.i;i++){ int jStride = level->boxes_in.i; int kStride = level->boxes_in.i*level->boxes_in.j; int b=i + j*jStride + k*kStride; if(level->rank_of_box[b]==level->my_rank){ level->memory_allocated += create_box(&level->my_boxes[box],level->box_vectors,level->box_dim,level->box_ghosts); level->my_boxes[box].low.i = i*level->box_dim; level->my_boxes[box].low.j = j*level->box_dim; level->my_boxes[box].low.k = k*level->box_dim; level->my_boxes[box].global_box_id = b; box++; }}}} initialize_valid_region(level); // define which cells are within the domain // Tune the OpenMP style of parallelism... if(omp_nested){ #ifndef OMP_STENCILS_PER_THREAD #define OMP_STENCILS_PER_THREAD 64 #endif level->concurrent_boxes = level->num_my_boxes; if(level->concurrent_boxes > omp_threads)level->concurrent_boxes = omp_threads; if(level->concurrent_boxes < 1)level->concurrent_boxes = 1; level->threads_per_box = omp_threads / level->concurrent_boxes; if(level->threads_per_box > level->box_dim*level->box_dim) level->threads_per_box = level->box_dim*level->box_dim; // JK collapse if(level->threads_per_box > level->box_dim*level->box_dim*level->box_dim/OMP_STENCILS_PER_THREAD ) level->threads_per_box = level->box_dim*level->box_dim*level->box_dim/OMP_STENCILS_PER_THREAD; if(level->threads_per_box<1)level->threads_per_box = 1; }else{ if(level->num_my_boxes>8){level->concurrent_boxes=omp_threads;level->threads_per_box=1;} } if(MPI_Rank==0){ if(omp_nested)printf(" OMP_NESTED=TRUE OMP_NUM_THREADS=%d ... %d teams of %d threads\n",omp_threads,level->concurrent_boxes,level->threads_per_box); else printf(" OMP_NESTED=FALSE OMP_NUM_THREADS=%d ... %d teams of %d threads\n",omp_threads,level->concurrent_boxes,level->threads_per_box); fflush(stdout); } // build an assist structure for Gauss Seidel Red Black that would facilitate unrolling and SIMDization... if(level->num_my_boxes){ int i,j; int kStride = level->my_boxes[0].kStride; int jStride = level->my_boxes[0].jStride; //posix_memalign((void**)&(level->RedBlack_FP[0] ),64,kStride*sizeof(double )); // even planes //posix_memalign((void**)&(level->RedBlack_FP[1] ),64,kStride*sizeof(double )); // FIX... align RedBlack_FP the same as elements within a plane (i.e. BOX_SIMD_ALIGNMENT) level->RedBlack_FP[0] = (double*)malloc(kStride*sizeof(double)); level->RedBlack_FP[1] = (double*)malloc(kStride*sizeof(double)); memset(level->RedBlack_FP[0],0,kStride*sizeof(double)); memset(level->RedBlack_FP[1],0,kStride*sizeof(double)); level->memory_allocated += kStride*sizeof(double); level->memory_allocated += kStride*sizeof(double); for(j=0-level->box_ghosts;j<level->box_dim+level->box_ghosts;j++){ for(i=0-level->box_ghosts;i<level->box_dim+level->box_ghosts;i++){ int ij = (i+level->box_ghosts) + (j+level->box_ghosts)*jStride; // if((i^j)&0x1)level->RedBlack_64bMask[ij]= ~0;else level->RedBlack_64bMask[ij]= 0; // useful for blend instructions if((i^j^1)&0x1)level->RedBlack_FP[ 0][ij]=1.0;else level->RedBlack_FP[ 0][ij]=0.0; if((i^j^1)&0x1)level->RedBlack_FP[ 1][ij]=0.0;else level->RedBlack_FP[ 1][ij]=1.0; }} } // create mini programs that affect ghost zone exchanges for(i=0;i<2;i++){ level->exchange_ghosts[i].num_recvs =0; level->exchange_ghosts[i].num_sends =0; level->exchange_ghosts[i].num_blocks[0]=0; level->exchange_ghosts[i].num_blocks[1]=0; level->exchange_ghosts[i].num_blocks[2]=0; } build_exchange_ghosts(level,0); // faces, edges, corners build_exchange_ghosts(level,1); // justFaces // duplicate MPI_COMM_WORLD to be the communicator for each level #ifdef USE_MPI if(MPI_Rank==0){printf(" Duplicating MPI_COMM_WORLD...");fflush(stdout);} double time_start = MPI_Wtime(); MPI_Comm_dup(MPI_COMM_WORLD,&level->MPI_COMM_ALLREDUCE); double time_end = MPI_Wtime(); double time_in_comm_dup = 0; double time_in_comm_dup_send = time_end-time_start; MPI_Allreduce(&time_in_comm_dup_send,&time_in_comm_dup,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD); if(MPI_Rank==0){printf("done (%0.6f seconds)\n",time_in_comm_dup);fflush(stdout);} #endif // report on potential load imbalance uint64_t BoxesPerProcess = level->num_my_boxes; #ifdef USE_MPI uint64_t BoxesPerProcessSend = level->num_my_boxes; MPI_Allreduce(&BoxesPerProcessSend,&BoxesPerProcess,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); #endif if(MPI_Rank==0){printf(" Calculating boxes per process... target=%0.3f, max=%ld\n\n",(double)TotalBoxes/(double)MPI_Tasks,BoxesPerProcess);fflush(stdout);} } //--------------------------------------------------------------------------------------------------------------------------------------------------- void reset_level_timers(level_type *level){ // cycle counters information... level->cycles.smooth = 0; level->cycles.apply_op = 0; level->cycles.residual = 0; level->cycles.blas1 = 0; level->cycles.blas3 = 0; level->cycles.boundary_conditions = 0; level->cycles.restriction_total = 0; level->cycles.restriction_pack = 0; level->cycles.restriction_local = 0; level->cycles.restriction_unpack = 0; level->cycles.restriction_recv = 0; level->cycles.restriction_send = 0; level->cycles.restriction_wait = 0; level->cycles.interpolation_total = 0; level->cycles.interpolation_pack = 0; level->cycles.interpolation_local = 0; level->cycles.interpolation_unpack = 0; level->cycles.interpolation_recv = 0; level->cycles.interpolation_send = 0; level->cycles.interpolation_wait = 0; level->cycles.ghostZone_total = 0; level->cycles.ghostZone_pack = 0; level->cycles.ghostZone_local = 0; level->cycles.ghostZone_unpack = 0; level->cycles.ghostZone_recv = 0; level->cycles.ghostZone_send = 0; level->cycles.ghostZone_wait = 0; level->cycles.collectives = 0; level->cycles.Total = 0; // solver events information... level->Krylov_iterations = 0; level->CAKrylov_formations_of_G = 0; level->vcycles_from_this_level = 0; } //--------------------------------------------------------------------------------------------------------------------------------------------------- void destroy_level(level_type *level){ int box; for(box=0;box>level->num_my_boxes;box++)destroy_box(&level->my_boxes[box]); free(level->rank_of_box); }
H2Pack_build.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <time.h> #include <omp.h> #include "H2Pack_config.h" #include "H2Pack_typedef.h" #include "H2Pack_aux_structs.h" #include "H2Pack_ID_compress.h" #include "H2Pack_build.h" #include "H2Pack_utils.h" #include "DAG_task_queue.h" #include "utils.h" typedef enum { U_BUILD_KRNL_TIMER_IDX = 0, U_BUILD_QR_TIMER_IDX, U_BUILD_OTHER_TIMER_IDX, U_BUILD_RANDN_TIMER_IDX, U_BUILD_GEMM_TIMER_IDX } u_build_timer_idx_t; // Build H2 projection matrices using proxy points // Input parameter: // h2pack : H2Pack structure with point partitioning info // Output parameter: // h2pack : H2Pack structure with H2 projection matrices void H2P_build_H2_UJ_proxy(H2Pack_p h2pack) { int pt_dim = h2pack->pt_dim; int xpt_dim = h2pack->xpt_dim; int krnl_dim = h2pack->krnl_dim; int n_node = h2pack->n_node; int n_point = h2pack->n_point; int n_thread = h2pack->n_thread; int max_child = h2pack->max_child; int stop_type = h2pack->QR_stop_type; int *children = h2pack->children; int *n_child = h2pack->n_child; int *node_level = h2pack->node_level; int *node_height = h2pack->node_height; int *pt_cluster = h2pack->pt_cluster; DTYPE *coord = h2pack->coord; DTYPE *enbox = h2pack->enbox; size_t *mat_size = h2pack->mat_size; void *krnl_param = h2pack->krnl_param; H2P_dense_mat_p *pp = h2pack->pp; H2P_thread_buf_p *thread_buf = h2pack->tb; kernel_eval_fptr krnl_eval = h2pack->krnl_eval; DAG_task_queue_p upward_tq = h2pack->upward_tq; void *stop_param = NULL; if (stop_type == QR_RANK) stop_param = &h2pack->QR_stop_rank; if ((stop_type == QR_REL_NRM) || (stop_type == QR_ABS_NRM)) stop_param = &h2pack->QR_stop_tol; // 1. Allocate U and J h2pack->n_UJ = n_node; h2pack->U = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node); h2pack->J = (H2P_int_vec_p*) malloc(sizeof(H2P_int_vec_p) * n_node); h2pack->J_coord = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node); ASSERT_PRINTF(h2pack->U != NULL, "Failed to allocate %d U matrices\n", n_node); ASSERT_PRINTF(h2pack->J != NULL, "Failed to allocate %d J matrices\n", n_node); ASSERT_PRINTF(h2pack->J_coord != NULL, "Failed to allocate %d J_coord auxiliary matrices\n", n_node); for (int i = 0; i < h2pack->n_UJ; i++) { h2pack->U[i] = NULL; h2pack->J[i] = NULL; h2pack->J_coord[i] = NULL; } H2P_dense_mat_p *U = h2pack->U; H2P_int_vec_p *J = h2pack->J; H2P_dense_mat_p *J_coord = h2pack->J_coord; // 2. Construct U for nodes whose level is not smaller than min_adm_level. // min_adm_level is the highest level that still has admissible blocks. #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); H2P_dense_mat_p A_block = thread_buf[tid]->mat0; H2P_dense_mat_p node_skel_coord = thread_buf[tid]->mat1; H2P_dense_mat_p QR_buff = thread_buf[tid]->mat2; H2P_int_vec_p sub_idx = thread_buf[tid]->idx0; H2P_int_vec_p ID_buff = thread_buf[tid]->idx1; thread_buf[tid]->timer = -get_wtime_sec(); int node = DAG_task_queue_get_task(upward_tq); while (node != -1) { int height = node_height[node]; int level = node_level[node]; // (1) Update row indices associated with clusters for current node if (height == 0) { // Leaf nodes, use all points int pt_s = pt_cluster[node * 2]; int pt_e = pt_cluster[node * 2 + 1]; int node_npt = pt_e - pt_s + 1; H2P_int_vec_init(&J[node], node_npt); for (int k = 0; k < node_npt; k++) J[node]->data[k] = pt_s + k; J[node]->length = node_npt; H2P_dense_mat_init(&J_coord[node], xpt_dim, node_npt); copy_matrix_block(sizeof(DTYPE), xpt_dim, node_npt, coord + pt_s, n_point, J_coord[node]->data, node_npt); } else { // Non-leaf nodes, gather row indices from children nodes int n_child_node = n_child[node]; int *child_nodes = children + node * max_child; int J_child_size = 0; for (int i_child = 0; i_child < n_child_node; i_child++) { int i_child_node = child_nodes[i_child]; J_child_size += J[i_child_node]->length; } H2P_int_vec_init(&J[node], J_child_size); for (int i_child = 0; i_child < n_child_node; i_child++) { int i_child_node = child_nodes[i_child]; H2P_int_vec_concatenate(J[node], J[i_child_node]); } } // End of "if (height == 0)" // (2) Gather current node's skeleton points (== all children nodes' skeleton points) H2P_dense_mat_resize(node_skel_coord, xpt_dim, J[node]->length); if (height == 0) { node_skel_coord = J_coord[node]; } else { int n_child_node = n_child[node]; int *child_nodes = children + node * max_child; int J_child_size = 0; for (int i_child = 0; i_child < n_child_node; i_child++) { int i_child_node = child_nodes[i_child]; int src_ld = J_coord[i_child_node]->ncol; int dst_ld = node_skel_coord->ncol; DTYPE *src_mat = J_coord[i_child_node]->data; DTYPE *dst_mat = node_skel_coord->data + J_child_size; copy_matrix_block(sizeof(DTYPE), xpt_dim, src_ld, src_mat, src_ld, dst_mat, dst_ld); J_child_size += J[i_child_node]->length; } } // End of "if (level == 0)" // (3) Shift current node's skeleton points so their center is at the original point DTYPE *node_box = enbox + node * 2 * pt_dim; int node_skel_npt = J[node]->length; int node_pp_npt = pp[level]->ncol; for (int k = 0; k < pt_dim; k++) { DTYPE box_center_k = node_box[k] + 0.5 * node_box[pt_dim + k]; DTYPE *node_skel_coord_k = node_skel_coord->data + k * node_skel_npt; #pragma omp simd for (int l = 0; l < node_skel_npt; l++) node_skel_coord_k[l] -= box_center_k; } // (4) Build the kernel matrix block int A_blk_nrow = node_skel_npt * krnl_dim; int A_blk_ncol = node_pp_npt * krnl_dim; H2P_dense_mat_resize(A_block, A_blk_nrow, A_blk_ncol); krnl_eval( node_skel_coord->data, node_skel_npt, node_skel_npt, pp[level]->data, node_pp_npt, node_pp_npt, krnl_param, A_block->data, A_block->ld ); #ifdef H2_UJ_BUILD_RANDOMIZE // It seems that this part makes the calculation slower instead of faster if (A_blk_ncol > 2 * A_blk_nrow && krnl_dim >= 3) { H2P_dense_mat_p A_block1 = node_skel_coord; H2P_dense_mat_p rand_mat = QR_buff; H2P_dense_mat_resize(A_block1, A_blk_nrow, A_blk_nrow); H2P_dense_mat_resize(rand_mat, A_blk_ncol, A_blk_nrow); H2P_gen_normal_distribution(0.0, 1.0, A_blk_ncol * A_blk_nrow, rand_mat->data); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, A_blk_nrow, A_blk_nrow, A_blk_ncol, 1.0, A_block->data, A_block->ld, rand_mat->data, rand_mat->ld, 0.0, A_block1->data, A_block1->ld ); H2P_dense_mat_resize(A_block, A_blk_nrow, A_blk_nrow); copy_matrix_block(sizeof(DTYPE), A_blk_nrow, A_blk_nrow, A_block1->data, A_block1->ld, A_block->data, A_block->ld); H2P_dense_mat_normalize_columns(A_block, A_block1); } #endif // (5) ID compress // Note: A is transposed in ID compress, be careful when calculating the buffer size if (krnl_dim == 1) { H2P_dense_mat_resize(QR_buff, A_block->nrow, 1); } else { int QR_buff_size = (2 * krnl_dim + 2) * A_block->ncol + (krnl_dim + 1) * A_block->nrow; H2P_dense_mat_resize(QR_buff, QR_buff_size, 1); } H2P_int_vec_set_capacity(ID_buff, 4 * A_block->nrow); H2P_ID_compress( A_block, stop_type, stop_param, &U[node], sub_idx, 1, QR_buff->data, ID_buff->data, krnl_dim ); // (6) Choose the skeleton points of this node for (int k = 0; k < sub_idx->length; k++) J[node]->data[k] = J[node]->data[sub_idx->data[k]]; J[node]->length = sub_idx->length; H2P_dense_mat_init(&J_coord[node], xpt_dim, sub_idx->length); H2P_gather_matrix_columns( coord, n_point, J_coord[node]->data, J[node]->length, xpt_dim, J[node]->data, J[node]->length ); // (7) Tell DAG_task_queue that this node is finished, and get next available node DAG_task_queue_finish_task(upward_tq, node); node = DAG_task_queue_get_task(upward_tq); } // End of "while (node != -1)" thread_buf[tid]->timer += get_wtime_sec(); } // End of "#pragma omp parallel num_thread(n_thread)" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread; INFO_PRINTF("Build U: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } // 3. Initialize other not touched U J & add statistic info for (int i = 0; i < h2pack->n_UJ; i++) { if (U[i] == NULL) { H2P_dense_mat_init(&U[i], 1, 1); U[i]->nrow = 0; U[i]->ncol = 0; U[i]->ld = 0; } else { mat_size[U_SIZE_IDX] += U[i]->nrow * U[i]->ncol; mat_size[MV_FWD_SIZE_IDX] += U[i]->nrow * U[i]->ncol; mat_size[MV_FWD_SIZE_IDX] += U[i]->nrow + U[i]->ncol; mat_size[MV_BWD_SIZE_IDX] += U[i]->nrow * U[i]->ncol; mat_size[MV_BWD_SIZE_IDX] += U[i]->nrow + U[i]->ncol; } if (J[i] == NULL) H2P_int_vec_init(&J[i], 1); if (J_coord[i] == NULL) { H2P_dense_mat_init(&J_coord[i], 1, 1); J_coord[i]->nrow = 0; J_coord[i]->ncol = 0; J_coord[i]->ld = 0; } } // End of "for (int i = 0; i < h2pack->n_UJ; i++)" for (int i = 0; i < n_thread; i++) H2P_thread_buf_reset(thread_buf[i]); BLAS_SET_NUM_THREADS(n_thread); } // Build HSS projection matrices using proxy points and skeleton points from H2 inadmissible nodes // Input parameter: // h2pack : H2Pack structure with point partitioning info // Output parameter: // h2pack : H2Pack structure with HSS projection matrices void H2P_build_HSS_UJ_hybrid(H2Pack_p h2pack) { int pt_dim = h2pack->pt_dim; int xpt_dim = h2pack->xpt_dim; int max_neighbor = h2pack->max_neighbor; int krnl_dim = h2pack->krnl_dim; int n_node = h2pack->n_node; int n_point = h2pack->n_point; int n_leaf_node = h2pack->n_leaf_node; int n_thread = h2pack->n_thread; int max_child = h2pack->max_child; int max_level = h2pack->max_level; int min_adm_level = h2pack->HSS_min_adm_level; int stop_type = h2pack->QR_stop_type; int *children = h2pack->children; int *n_child = h2pack->n_child; int *node_height = h2pack->node_height; int *level_n_node = h2pack->level_n_node; int *level_nodes = h2pack->level_nodes; int *leaf_nodes = h2pack->height_nodes; int *node_level = h2pack->node_level; int *pt_cluster = h2pack->pt_cluster; int *node_n_r_inadm = h2pack->node_n_r_inadm; int *node_inadm_lists = h2pack->node_inadm_lists; DTYPE *coord = h2pack->coord; DTYPE *enbox = h2pack->enbox; size_t *mat_size = h2pack->mat_size; void *krnl_param = h2pack->krnl_param; H2P_dense_mat_p *pp = h2pack->pp; H2P_thread_buf_p *thread_buf = h2pack->tb; kernel_eval_fptr krnl_eval = h2pack->krnl_eval; void *stop_param = NULL; if (stop_type == QR_RANK) stop_param = &h2pack->QR_stop_rank; if ((stop_type == QR_REL_NRM) || (stop_type == QR_ABS_NRM)) stop_param = &h2pack->QR_stop_tol; // 1. Allocate U and J h2pack->n_UJ = n_node; h2pack->U = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node); h2pack->J = (H2P_int_vec_p*) malloc(sizeof(H2P_int_vec_p) * n_node); h2pack->J_coord = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node); ASSERT_PRINTF(h2pack->U != NULL, "Failed to allocate %d U matrices\n", n_node); ASSERT_PRINTF(h2pack->J != NULL, "Failed to allocate %d J matrices\n", n_node); ASSERT_PRINTF(h2pack->J_coord != NULL, "Failed to allocate %d J_coord auxiliary matrices\n", n_node); for (int i = 0; i < h2pack->n_UJ; i++) { h2pack->U[i] = NULL; h2pack->J[i] = NULL; h2pack->J_coord[i] = NULL; } H2P_dense_mat_p *U = h2pack->U; H2P_int_vec_p *J = h2pack->J; H2P_dense_mat_p *J_coord = h2pack->J_coord; double *U_timers = (double*) malloc(sizeof(double) * n_thread * 8); // 2. Initialize the row indices for leaf nodes: all points in that box for (int j = 0; j < n_leaf_node; j++) { int node = leaf_nodes[j]; int pt_s = pt_cluster[node * 2]; int pt_e = pt_cluster[node * 2 + 1]; int node_npt = pt_e - pt_s + 1; H2P_int_vec_init(&J[node], node_npt); for (int k = 0; k < node_npt; k++) J[node]->data[k] = pt_s + k; J[node]->length = node_npt; H2P_dense_mat_init(&J_coord[node], xpt_dim, node_npt); copy_matrix_block(sizeof(DTYPE), xpt_dim, node_npt, coord + pt_s, n_point, J_coord[node]->data, node_npt); } // 3. Hierarchical construction level by level. min_adm_level is the // highest level that still has admissible blocks. for (int i = max_level; i >= min_adm_level; i--) { int *level_i_nodes = level_nodes + i * n_leaf_node; int level_i_n_node = level_n_node[i]; int n_thread_i = MIN(level_i_n_node, n_thread); int level = i; // (1) Update row indices associated with clusters at level i #pragma omp parallel num_threads(n_thread_i) { int tid = omp_get_thread_num(); thread_buf[tid]->timer = -get_wtime_sec(); #pragma omp for schedule(dynamic) nowait for (int j = 0; j < level_i_n_node; j++) { int node = level_i_nodes[j]; int n_child_node = n_child[node]; if (n_child_node == 0) continue; // J[node] has already been prepared for leaf node int *child_nodes = children + node * max_child; int J_child_size = 0; for (int i_child = 0; i_child < n_child_node; i_child++) { int i_child_node = child_nodes[i_child]; J_child_size += J[i_child_node]->length; } H2P_int_vec_init(&J[node], J_child_size); for (int i_child = 0; i_child < n_child_node; i_child++) { int i_child_node = child_nodes[i_child]; H2P_int_vec_concatenate(J[node], J[i_child_node]); } } thread_buf[tid]->timer += get_wtime_sec(); } #pragma omp parallel num_threads(n_thread_i) { int tid = omp_get_thread_num(); H2P_int_vec_p inadm_skel_idx = thread_buf[tid]->idx0; H2P_int_vec_p sub_idx = thread_buf[tid]->idx0; H2P_int_vec_p ID_buff = thread_buf[tid]->idx1; H2P_dense_mat_p node_skel_coord = thread_buf[tid]->mat0; H2P_dense_mat_p inadm_skel_coord = thread_buf[tid]->mat1; H2P_dense_mat_p A_block = thread_buf[tid]->mat2; H2P_dense_mat_p QR_buff = thread_buf[tid]->mat1; double st, et, krnl_t = 0.0, randn_t = 0.0, gemm_t = 0.0, QR_t = 0.0, other_t = 0.0; thread_buf[tid]->timer -= get_wtime_sec(); #pragma omp for schedule(dynamic) nowait for (int j = 0; j < level_i_n_node; j++) { int node = level_i_nodes[j]; int height = node_height[node]; // (2) Calculate the number of skeleton points from all inadmissible nodes st = get_wtime_sec(); int n_r_inadm = node_n_r_inadm[node]; int inadm_skel_npt = 0; inadm_skel_idx->length = 0; if (n_r_inadm > 0) { int *inadm_list = node_inadm_lists + node * max_neighbor; for (int k = 0; k < n_r_inadm; k++) { int inadm_node_k = inadm_list[k]; ASSERT_PRINTF( J[inadm_node_k] != NULL, "Node %3d (level %2d): inadmissible node %3d (level %2d) skeleton point not ready!\n", node, level, inadm_node_k, node_level[k] ); H2P_int_vec_concatenate(inadm_skel_idx, J[inadm_node_k]); } inadm_skel_npt = inadm_skel_idx->length; } et = get_wtime_sec(); other_t += et - st; // (3) Gather current node's skeleton points (== all children nodes' skeleton points) st = get_wtime_sec(); H2P_dense_mat_resize(node_skel_coord, xpt_dim, J[node]->length); if (height == 0) { node_skel_coord = J_coord[node]; } else { int n_child_node = n_child[node]; int *child_nodes = children + node * max_child; int J_child_size = 0; for (int i_child = 0; i_child < n_child_node; i_child++) { int i_child_node = child_nodes[i_child]; int src_ld = J_coord[i_child_node]->ncol; int dst_ld = node_skel_coord->ncol; DTYPE *src_mat = J_coord[i_child_node]->data; DTYPE *dst_mat = node_skel_coord->data + J_child_size; copy_matrix_block(sizeof(DTYPE), xpt_dim, src_ld, src_mat, src_ld, dst_mat, dst_ld); J_child_size += J[i_child_node]->length; } } et = get_wtime_sec(); other_t += et - st; // (4) Shift points in this node so their center is at the original point st = get_wtime_sec(); DTYPE *node_box = enbox + node * 2 * pt_dim; int node_skel_npt = J[node]->length; int node_pp_npt = pp[level]->ncol; for (int k = 0; k < pt_dim; k++) { DTYPE box_center_k = node_box[k] + 0.5 * node_box[pt_dim + k]; DTYPE *node_skel_coord_k = node_skel_coord->data + k * node_skel_npt; #pragma omp simd for (int l = 0; l < node_skel_npt; l++) node_skel_coord_k[l] -= box_center_k; } et = get_wtime_sec(); other_t += et - st; // (5) Gather skeleton points from all inadmissible nodes and shift their centers to // the original point. We also put proxy points into inadm_skel_coord. st = get_wtime_sec(); H2P_dense_mat_resize(inadm_skel_coord, xpt_dim, inadm_skel_npt + node_pp_npt); if (inadm_skel_npt > 0) { for (int k = 0; k < pt_dim; k++) { DTYPE box_center_k = node_box[k] + 0.5 * node_box[pt_dim + k]; DTYPE *coord_k = coord + k * n_point; DTYPE *inadm_skel_coord_k = inadm_skel_coord->data + k * inadm_skel_coord->ld; #pragma omp simd for (int l = 0; l < inadm_skel_npt; l++) inadm_skel_coord_k[l] = coord_k[inadm_skel_idx->data[l]] - box_center_k; } if (xpt_dim > pt_dim) { for (int k = pt_dim; k < xpt_dim; k++) { DTYPE *coord_k = coord + k * n_point; DTYPE *inadm_skel_coord_k = inadm_skel_coord->data + k * inadm_skel_coord->ld; #pragma omp simd for (int l = 0; l < inadm_skel_npt; l++) inadm_skel_coord_k[l] = coord_k[inadm_skel_idx->data[l]]; } } } if (node_pp_npt > 0) { copy_matrix_block( sizeof(DTYPE), xpt_dim, node_pp_npt, pp[level]->data, pp[level]->ld, inadm_skel_coord->data + inadm_skel_npt, inadm_skel_coord->ld ); } et = get_wtime_sec(); other_t += et - st; // (6) Build the hybrid matrix block st = get_wtime_sec(); int A_blk_nrow = node_skel_npt * krnl_dim; int A_blk_ncol = inadm_skel_coord->ncol * krnl_dim; H2P_dense_mat_resize(A_block, A_blk_nrow, A_blk_ncol); krnl_eval( node_skel_coord->data, node_skel_coord->ncol, node_skel_coord->ld, inadm_skel_coord->data, inadm_skel_coord->ncol, inadm_skel_coord->ld, krnl_param, A_block->data, A_block->ld ); //H2P_dense_mat_normalize_columns(A_block, inadm_skel_coord); et = get_wtime_sec(); krnl_t += et - st; if (A_blk_ncol > 2 * A_blk_nrow) { st = get_wtime_sec(); H2P_dense_mat_p A_block1 = node_skel_coord; H2P_dense_mat_p rand_mat = inadm_skel_coord; H2P_dense_mat_resize(A_block1, A_blk_nrow, A_blk_nrow); H2P_dense_mat_resize(rand_mat, A_blk_ncol, A_blk_nrow); H2P_gen_normal_distribution(0.0, 1.0, A_blk_ncol * A_blk_nrow, rand_mat->data); et = get_wtime_sec(); randn_t += et - st; st = get_wtime_sec(); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, A_blk_nrow, A_blk_nrow, A_blk_ncol, 1.0, A_block->data, A_block->ld, rand_mat->data, rand_mat->ld, 0.0, A_block1->data, A_block1->ld ); H2P_dense_mat_resize(A_block, A_blk_nrow, A_blk_nrow); copy_matrix_block(sizeof(DTYPE), A_blk_nrow, A_blk_nrow, A_block1->data, A_block1->ld, A_block->data, A_block->ld); et = get_wtime_sec(); gemm_t += et - st; } // (7) ID compress // Note: A is transposed in ID compress, be careful when calculating the buffer size st = get_wtime_sec(); if (krnl_dim == 1) { H2P_dense_mat_resize(QR_buff, A_block->nrow, 1); } else { int QR_buff_size = (2 * krnl_dim + 2) * A_block->ncol + (krnl_dim + 1) * A_block->nrow; H2P_dense_mat_resize(QR_buff, QR_buff_size, 1); } H2P_int_vec_set_capacity(ID_buff, 4 * A_block->nrow); H2P_ID_compress( A_block, stop_type, stop_param, &U[node], sub_idx, 1, QR_buff->data, ID_buff->data, krnl_dim ); et = get_wtime_sec(); QR_t += et - st; // (8) Choose the skeleton points of this node st = get_wtime_sec(); for (int k = 0; k < sub_idx->length; k++) J[node]->data[k] = J[node]->data[sub_idx->data[k]]; J[node]->length = sub_idx->length; H2P_dense_mat_init(&J_coord[node], xpt_dim, sub_idx->length); H2P_gather_matrix_columns( coord, n_point, J_coord[node]->data, J[node]->length, xpt_dim, J[node]->data, J[node]->length ); et = get_wtime_sec(); other_t += et - st; } // End of j loop thread_buf[tid]->timer += get_wtime_sec(); double *timers = U_timers + tid * 8; timers[U_BUILD_KRNL_TIMER_IDX] = krnl_t; timers[U_BUILD_RANDN_TIMER_IDX] = randn_t; timers[U_BUILD_GEMM_TIMER_IDX] = gemm_t; timers[U_BUILD_QR_TIMER_IDX] = QR_t; timers[U_BUILD_OTHER_TIMER_IDX] = other_t; } // End of "pragma omp parallel" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread_i; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread_i; INFO_PRINTF("Build U: level %d, %d/%d threads, %d nodes\n", i, n_thread_i, n_thread, level_n_node[i]); INFO_PRINTF(" min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); INFO_PRINTF("Build U subroutine time consumption:\n"); INFO_PRINTF(" tid, kernel eval, randn gen, gemm, ID compress, misc, total\n"); for (int tid = 0; tid < n_thread_i; tid++) { double *timers = U_timers + 8 * tid; INFO_PRINTF( " %3d, %6.3lf, %6.3lf, %6.3lf, %6.3lf, %6.3lf, %6.3lf\n", tid, timers[U_BUILD_KRNL_TIMER_IDX], timers[U_BUILD_RANDN_TIMER_IDX], timers[U_BUILD_GEMM_TIMER_IDX], timers[U_BUILD_QR_TIMER_IDX], timers[U_BUILD_OTHER_TIMER_IDX], thread_buf[tid]->timer ); } } // End of "if (h2pack->print_timers == 1)" } // End of i loop // 3. Initialize other not touched U J & add statistic info for (int i = 0; i < h2pack->n_UJ; i++) { if (U[i] == NULL) { H2P_dense_mat_init(&U[i], 1, 1); U[i]->nrow = 0; U[i]->ncol = 0; U[i]->ld = 0; } else { mat_size[U_SIZE_IDX] += U[i]->nrow * U[i]->ncol; mat_size[MV_FWD_SIZE_IDX] += U[i]->nrow * U[i]->ncol; mat_size[MV_FWD_SIZE_IDX] += U[i]->nrow + U[i]->ncol; mat_size[MV_BWD_SIZE_IDX] += U[i]->nrow * U[i]->ncol; mat_size[MV_BWD_SIZE_IDX] += U[i]->nrow + U[i]->ncol; } if (J[i] == NULL) H2P_int_vec_init(&J[i], 1); if (J_coord[i] == NULL) { H2P_dense_mat_init(&J_coord[i], 1, 1); J_coord[i]->nrow = 0; J_coord[i]->ncol = 0; J_coord[i]->ld = 0; } //printf("Node %3d: %d skeleton points\n", i, J[i]->length); } free(U_timers); for (int i = 0; i < n_thread; i++) H2P_thread_buf_reset(thread_buf[i]); BLAS_SET_NUM_THREADS(n_thread); } // Generate H2 generator matrices metadata // Input parameter: // h2pack : H2Pack structure with H2 projection matrices // Output parameter: // h2pack : H2Pack structure with H2 generator matrices metadata void H2P_generate_B_metadata(H2Pack_p h2pack) { int BD_JIT = h2pack->BD_JIT; int krnl_dim = h2pack->krnl_dim; int n_node = h2pack->n_node; int n_thread = h2pack->n_thread; int krnl_bimv_flops = h2pack->krnl_bimv_flops; int is_RPY_Ewald = h2pack->is_RPY_Ewald; int *node_level = h2pack->node_level; int *pt_cluster = h2pack->pt_cluster; size_t *mat_size = h2pack->mat_size; double *JIT_flops = h2pack->JIT_flops; H2P_int_vec_p B_blk = h2pack->B_blk; int n_r_adm_pair, *r_adm_pairs; if (h2pack->is_HSS) { n_r_adm_pair = h2pack->HSS_n_r_adm_pair; r_adm_pairs = h2pack->HSS_r_adm_pairs; } else { n_r_adm_pair = h2pack->n_r_adm_pair; r_adm_pairs = h2pack->r_adm_pairs; } // 1. Allocate arrays for storing sizes and pair information of B matrices h2pack->n_B = n_r_adm_pair; h2pack->B_nrow = (int*) malloc(sizeof(int) * n_r_adm_pair); h2pack->B_ncol = (int*) malloc(sizeof(int) * n_r_adm_pair); h2pack->B_ptr = (size_t*) malloc(sizeof(size_t) * (n_r_adm_pair + 1)); int *B_nrow = h2pack->B_nrow; int *B_ncol = h2pack->B_ncol; size_t *B_ptr = h2pack->B_ptr; ASSERT_PRINTF( h2pack->B_nrow != NULL && h2pack->B_ncol != NULL && h2pack->B_ptr != NULL, "Failed to allocate %d B matrices infomation array\n", n_r_adm_pair ); int B_pair_cnt = 0; int *B_pair_i = (int*) malloc(sizeof(int) * n_r_adm_pair * 2); int *B_pair_j = (int*) malloc(sizeof(int) * n_r_adm_pair * 2); int *B_pair_v = (int*) malloc(sizeof(int) * n_r_adm_pair * 2); ASSERT_PRINTF( B_pair_i != NULL && B_pair_j != NULL && B_pair_v != NULL, "Failed to allocate working buffer for B matrices indexing\n" ); // 2. Generate size, pair, and statistic information of each B matrix B_ptr[0] = 0; size_t B_total_size = 0; H2P_int_vec_p *J = h2pack->J; h2pack->node_n_r_adm = (int*) malloc(sizeof(int) * n_node); ASSERT_PRINTF( h2pack->node_n_r_adm != NULL, "Failed to allocate array of size %d for counting node admissible pairs\n", n_node ); int *node_n_r_adm = h2pack->node_n_r_adm; memset(node_n_r_adm, 0, sizeof(int) * n_node); for (int i = 0; i < n_r_adm_pair; i++) { int node0 = r_adm_pairs[2 * i]; int node1 = r_adm_pairs[2 * i + 1]; int level0 = node_level[node0]; int level1 = node_level[node1]; node_n_r_adm[node0]++; if (is_RPY_Ewald == 0) node_n_r_adm[node1]++; int node0_npt = 0, node1_npt = 0; if (level0 == level1) { node0_npt = J[node0]->length; node1_npt = J[node1]->length; } if (level0 > level1) { int pt_s1 = pt_cluster[2 * node1]; int pt_e1 = pt_cluster[2 * node1 + 1]; node0_npt = J[node0]->length; node1_npt = pt_e1 - pt_s1 + 1; } if (level0 < level1) { int pt_s0 = pt_cluster[2 * node0]; int pt_e0 = pt_cluster[2 * node0 + 1]; node0_npt = pt_e0 - pt_s0 + 1; node1_npt = J[node1]->length; } B_nrow[i] = node0_npt * krnl_dim; B_ncol[i] = node1_npt * krnl_dim; size_t Bi_size = (size_t) B_nrow[i] * (size_t) B_ncol[i]; //Bi_size = (Bi_size + N_DTYPE_64B - 1) / N_DTYPE_64B * N_DTYPE_64B; B_total_size += Bi_size; B_ptr[i + 1] = Bi_size; B_pair_i[B_pair_cnt] = node0; B_pair_j[B_pair_cnt] = node1; B_pair_v[B_pair_cnt] = i + 1; B_pair_cnt++; if (is_RPY_Ewald == 0) { B_pair_i[B_pair_cnt] = node1; B_pair_j[B_pair_cnt] = node0; B_pair_v[B_pair_cnt] = -(i + 1); B_pair_cnt++; } // Add Statistic info mat_size[MV_MID_SIZE_IDX] += B_nrow[i] * B_ncol[i]; mat_size[MV_MID_SIZE_IDX] += (B_nrow[i] + B_ncol[i]); JIT_flops[JIT_B_FLOPS_IDX] += (double)(krnl_bimv_flops) * (double)(node0_npt * node1_npt); if (is_RPY_Ewald == 0) mat_size[MV_MID_SIZE_IDX] += (B_nrow[i] + B_ncol[i]); } // 3. Partition B matrices into multiple blocks s.t. each block has approximately // the same workload (total size of B matrices in a block) int BD_ntask_thread = (BD_JIT == 1) ? BD_NTASK_THREAD : 1; H2P_partition_workload(n_r_adm_pair, B_ptr + 1, B_total_size, n_thread * BD_ntask_thread, B_blk); for (int i = 1; i <= n_r_adm_pair; i++) B_ptr[i] += B_ptr[i - 1]; mat_size[B_SIZE_IDX] = B_total_size; // 4. Store pair-to-index relations in a CSR matrix for matvec, matmul, and SPDHSS construction h2pack->B_p2i_rowptr = (int*) malloc(sizeof(int) * (n_node + 1)); h2pack->B_p2i_colidx = (int*) malloc(sizeof(int) * n_r_adm_pair * 2); h2pack->B_p2i_val = (int*) malloc(sizeof(int) * n_r_adm_pair * 2); ASSERT_PRINTF(h2pack->B_p2i_rowptr != NULL, "Failed to allocate arrays for B matrices indexing\n"); ASSERT_PRINTF(h2pack->B_p2i_colidx != NULL, "Failed to allocate arrays for B matrices indexing\n"); ASSERT_PRINTF(h2pack->B_p2i_val != NULL, "Failed to allocate arrays for B matrices indexing\n"); H2P_int_COO_to_CSR( n_node, B_pair_cnt, B_pair_i, B_pair_j, B_pair_v, h2pack->B_p2i_rowptr, h2pack->B_p2i_colidx, h2pack->B_p2i_val ); free(B_pair_i); free(B_pair_j); free(B_pair_v); } // Build H2 generator matrices for AOT mode // Input parameter: // h2pack : H2Pack structure with H2 generator matrices metadata // Output parameter: // h2pack : H2Pack structure with H2 generator matrices void H2P_build_B_AOT(H2Pack_p h2pack) { int krnl_dim = h2pack->krnl_dim; int n_point = h2pack->n_point; int n_thread = h2pack->n_thread; int *node_level = h2pack->node_level; int *pt_cluster = h2pack->pt_cluster; int *r_adm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_adm_pairs : h2pack->r_adm_pairs; size_t *B_ptr = h2pack->B_ptr; DTYPE *coord = h2pack->coord; void *krnl_param = h2pack->krnl_param; kernel_eval_fptr krnl_eval = h2pack->krnl_eval; H2P_int_vec_p B_blk = h2pack->B_blk; H2P_dense_mat_p *J_coord = h2pack->J_coord; H2P_thread_buf_p *thread_buf = h2pack->tb; size_t B_total_size = h2pack->mat_size[B_SIZE_IDX]; h2pack->B_data = (DTYPE*) malloc_aligned(sizeof(DTYPE) * B_total_size, 64); ASSERT_PRINTF(h2pack->B_data != NULL, "Failed to allocate space for storing all %zu B matrices elements\n", B_total_size); DTYPE *B_data = h2pack->B_data; const int n_B_blk = B_blk->length - 1; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); thread_buf[tid]->timer = -get_wtime_sec(); //#pragma omp for schedule(dynamic) nowait //for (int i_blk = 0; i_blk < n_B_blk; i_blk++) int i_blk = tid; // Use first-touch policy for better NUMA memory access performance { int B_blk_s = B_blk->data[i_blk]; int B_blk_e = B_blk->data[i_blk + 1]; if (i_blk >= n_B_blk) { B_blk_s = 0; B_blk_e = 0; } for (int i = B_blk_s; i < B_blk_e; i++) { int node0 = r_adm_pairs[2 * i]; int node1 = r_adm_pairs[2 * i + 1]; int level0 = node_level[node0]; int level1 = node_level[node1]; DTYPE *Bi = B_data + B_ptr[i]; // (1) Two nodes are of the same level, compress on both sides if (level0 == level1) { krnl_eval( J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol, J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol, krnl_param, Bi, J_coord[node1]->ncol * krnl_dim ); } // (2) node1 is a leaf node and its level is higher than node0's level, // only compress on node0's side if (level0 > level1) { int pt_s1 = pt_cluster[2 * node1]; int pt_e1 = pt_cluster[2 * node1 + 1]; int node1_npt = pt_e1 - pt_s1 + 1; krnl_eval( J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol, coord + pt_s1, n_point, node1_npt, krnl_param, Bi, node1_npt * krnl_dim ); } // (3) node0 is a leaf node and its level is higher than node1's level, // only compress on node1's side if (level0 < level1) { int pt_s0 = pt_cluster[2 * node0]; int pt_e0 = pt_cluster[2 * node0 + 1]; int node0_npt = pt_e0 - pt_s0 + 1; krnl_eval( coord + pt_s0, n_point, node0_npt, J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol, krnl_param, Bi, J_coord[node1]->ncol * krnl_dim ); } } // End of i loop } // End of i_blk loop thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread; INFO_PRINTF("Build B: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } } // Generate H2 dense blocks metadata // Input parameter: // h2pack : H2Pack structure with H2 projection matrices // Output parameter: // h2pack : H2Pack structure with H2 dense blocks metadata void H2P_generate_D_metadata(H2Pack_p h2pack) { int BD_JIT = h2pack->BD_JIT; int krnl_dim = h2pack->krnl_dim; int n_node = h2pack->n_node; int n_thread = h2pack->n_thread; int n_leaf_node = h2pack->n_leaf_node; int krnl_bimv_flops = h2pack->krnl_bimv_flops; int is_RPY_Ewald = h2pack->is_RPY_Ewald; int *leaf_nodes = h2pack->height_nodes; int *pt_cluster = h2pack->pt_cluster; size_t *mat_size = h2pack->mat_size; double *JIT_flops = h2pack->JIT_flops; H2P_int_vec_p D_blk0 = h2pack->D_blk0; H2P_int_vec_p D_blk1 = h2pack->D_blk1; int n_r_inadm_pair, *r_inadm_pairs; if (h2pack->is_HSS) { n_r_inadm_pair = h2pack->HSS_n_r_inadm_pair; r_inadm_pairs = h2pack->HSS_r_inadm_pairs; } else { n_r_inadm_pair = h2pack->n_r_inadm_pair; r_inadm_pairs = h2pack->r_inadm_pairs; } // 1. Allocate arrays for storing sizes and pair information of D blocks h2pack->n_D = n_leaf_node + n_r_inadm_pair; h2pack->D_nrow = (int*) malloc(sizeof(int) * h2pack->n_D); h2pack->D_ncol = (int*) malloc(sizeof(int) * h2pack->n_D); h2pack->D_ptr = (size_t*) malloc(sizeof(size_t) * (h2pack->n_D + 1)); int *D_nrow = h2pack->D_nrow; int *D_ncol = h2pack->D_ncol; size_t *D_ptr = h2pack->D_ptr; ASSERT_PRINTF( h2pack->D_nrow != NULL && h2pack->D_ncol != NULL && h2pack->D_ptr != NULL, "Failed to allocate %d D matrices infomation array\n", h2pack->n_D ); int D_pair_cnt = 0; int n_Dij_pair = n_leaf_node + 2 * n_r_inadm_pair; int *D_pair_i = (int*) malloc(sizeof(int) * n_Dij_pair); int *D_pair_j = (int*) malloc(sizeof(int) * n_Dij_pair); int *D_pair_v = (int*) malloc(sizeof(int) * n_Dij_pair); ASSERT_PRINTF( D_pair_i != NULL && D_pair_j != NULL && D_pair_v != NULL, "Failed to allocate working buffer for D matrices indexing\n" ); // 2. Generate size, pair, and statistic information of each D matrix D_ptr[0] = 0; size_t D0_total_size = 0; for (int i = 0; i < n_leaf_node; i++) { int node = leaf_nodes[i]; int pt_s = pt_cluster[2 * node]; int pt_e = pt_cluster[2 * node + 1]; int node_npt = pt_e - pt_s + 1; D_nrow[i] = node_npt * krnl_dim; D_ncol[i] = node_npt * krnl_dim; size_t Di_size = (size_t) D_nrow[i] * (size_t) D_ncol[i]; //Di_size = (Di_size + N_DTYPE_64B - 1) / N_DTYPE_64B * N_DTYPE_64B; D_ptr[i + 1] = Di_size; D0_total_size += Di_size; D_pair_i[D_pair_cnt] = node; D_pair_j[D_pair_cnt] = node; D_pair_v[D_pair_cnt] = i + 1; D_pair_cnt++; // Add statistic info mat_size[MV_DEN_SIZE_IDX] += D_nrow[i] * D_ncol[i]; mat_size[MV_DEN_SIZE_IDX] += D_nrow[i] + D_ncol[i]; JIT_flops[JIT_D_FLOPS_IDX] += (double)(krnl_bimv_flops) * (double)(node_npt * node_npt); } size_t D1_total_size = 0; for (int i = 0; i < n_r_inadm_pair; i++) { int ii = i + n_leaf_node; int node0 = r_inadm_pairs[2 * i]; int node1 = r_inadm_pairs[2 * i + 1]; int pt_s0 = pt_cluster[2 * node0]; int pt_s1 = pt_cluster[2 * node1]; int pt_e0 = pt_cluster[2 * node0 + 1]; int pt_e1 = pt_cluster[2 * node1 + 1]; int node0_npt = pt_e0 - pt_s0 + 1; int node1_npt = pt_e1 - pt_s1 + 1; D_nrow[ii] = node0_npt * krnl_dim; D_ncol[ii] = node1_npt * krnl_dim; size_t Di_size = (size_t) D_nrow[ii] * (size_t) D_ncol[ii]; //Di_size = (Di_size + N_DTYPE_64B - 1) / N_DTYPE_64B * N_DTYPE_64B; D_ptr[ii + 1] = Di_size; D1_total_size += Di_size; D_pair_i[D_pair_cnt] = node0; D_pair_j[D_pair_cnt] = node1; D_pair_v[D_pair_cnt] = ii + 1; D_pair_cnt++; if (is_RPY_Ewald == 0) { D_pair_i[D_pair_cnt] = node1; D_pair_j[D_pair_cnt] = node0; D_pair_v[D_pair_cnt] = -(ii + 1); D_pair_cnt++; } // Add statistic info mat_size[MV_DEN_SIZE_IDX] += D_nrow[ii] * D_ncol[ii]; mat_size[MV_DEN_SIZE_IDX] += (D_nrow[ii] + D_ncol[ii]); JIT_flops[JIT_D_FLOPS_IDX] += (double)(krnl_bimv_flops) * (double)(node0_npt * node1_npt); if (is_RPY_Ewald == 0) mat_size[MV_DEN_SIZE_IDX] += (D_nrow[ii] + D_ncol[ii]); } // 3. Partition D blocks into multiple blocks s.t. each block has approximately // the same workload (total size of D blocks in a block) int BD_ntask_thread = (BD_JIT == 1) ? BD_NTASK_THREAD : 1; H2P_partition_workload(n_leaf_node, D_ptr + 1, D0_total_size, n_thread * BD_ntask_thread, D_blk0); H2P_partition_workload(n_r_inadm_pair, D_ptr + n_leaf_node + 1, D1_total_size, n_thread * BD_ntask_thread, D_blk1); for (int i = 1; i <= n_leaf_node + n_r_inadm_pair; i++) D_ptr[i] += D_ptr[i - 1]; mat_size[D_SIZE_IDX] = D0_total_size + D1_total_size; // 4. Store pair-to-index relations in a CSR matrix for matvec, matmul, and SPDHSS construction h2pack->D_p2i_rowptr = (int*) malloc(sizeof(int) * (n_node + 1)); h2pack->D_p2i_colidx = (int*) malloc(sizeof(int) * n_Dij_pair); h2pack->D_p2i_val = (int*) malloc(sizeof(int) * n_Dij_pair); ASSERT_PRINTF(h2pack->D_p2i_rowptr != NULL, "Failed to allocate arrays for D matrices indexing\n"); ASSERT_PRINTF(h2pack->D_p2i_colidx != NULL, "Failed to allocate arrays for D matrices indexing\n"); ASSERT_PRINTF(h2pack->D_p2i_val != NULL, "Failed to allocate arrays for D matrices indexing\n"); H2P_int_COO_to_CSR( n_node, D_pair_cnt, D_pair_i, D_pair_j, D_pair_v, h2pack->D_p2i_rowptr, h2pack->D_p2i_colidx, h2pack->D_p2i_val ); free(D_pair_i); free(D_pair_j); free(D_pair_v); } // Build H2 dense blocks for AOT mode // Input parameter: // h2pack : H2Pack structure with H2 dense blocks metadata // Output parameter: // h2pack : H2Pack structure with H2 dense blocks void H2P_build_D_AOT(H2Pack_p h2pack) { int krnl_dim = h2pack->krnl_dim; int n_thread = h2pack->n_thread; int n_point = h2pack->n_point; int n_leaf_node = h2pack->n_leaf_node; int *leaf_nodes = h2pack->height_nodes; int *pt_cluster = h2pack->pt_cluster; int *r_inadm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_inadm_pairs : h2pack->r_inadm_pairs; size_t *D_ptr = h2pack->D_ptr; DTYPE *coord = h2pack->coord; void *krnl_param = h2pack->krnl_param; kernel_eval_fptr krnl_eval = h2pack->krnl_eval; H2P_int_vec_p D_blk0 = h2pack->D_blk0; H2P_int_vec_p D_blk1 = h2pack->D_blk1; H2P_thread_buf_p *thread_buf = h2pack->tb; size_t D_total_size = h2pack->mat_size[D_SIZE_IDX]; h2pack->D_data = (DTYPE*) malloc_aligned(sizeof(DTYPE) * D_total_size, 64); ASSERT_PRINTF( h2pack->D_data != NULL, "Failed to allocate space for storing all %zu D matrices elements\n", D_total_size ); DTYPE *D_data = h2pack->D_data; const int n_D0_blk = D_blk0->length - 1; const int n_D1_blk = D_blk1->length - 1; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); thread_buf[tid]->timer = -get_wtime_sec(); // 3. Generate diagonal blocks (leaf node self interaction) //#pragma omp for schedule(dynamic) nowait //for (int i_blk0 = 0; i_blk0 < n_D0_blk; i_blk0++) int i_blk0 = tid; // Use first-touch policy for better NUMA memory access performance { int D_blk0_s = D_blk0->data[i_blk0]; int D_blk0_e = D_blk0->data[i_blk0 + 1]; if (i_blk0 >= n_D0_blk) { D_blk0_s = 0; D_blk0_e = 0; } for (int i = D_blk0_s; i < D_blk0_e; i++) { int node = leaf_nodes[i]; int pt_s = pt_cluster[2 * node]; int pt_e = pt_cluster[2 * node + 1]; int node_npt = pt_e - pt_s + 1; DTYPE *Di = D_data + D_ptr[i]; krnl_eval( coord + pt_s, n_point, node_npt, coord + pt_s, n_point, node_npt, krnl_param, Di, node_npt * krnl_dim ); } } // End of i_blk0 loop // 4. Generate off-diagonal blocks from inadmissible pairs //#pragma omp for schedule(dynamic) nowait //for (int i_blk1 = 0; i_blk1 < n_D1_blk; i_blk1++) int i_blk1 = tid; // Use first-touch policy for better NUMA memory access performance { int D_blk1_s = D_blk1->data[i_blk1]; int D_blk1_e = D_blk1->data[i_blk1 + 1]; if (i_blk1 >= n_D1_blk) { D_blk1_s = 0; D_blk1_e = 0; } for (int i = D_blk1_s; i < D_blk1_e; i++) { int node0 = r_inadm_pairs[2 * i]; int node1 = r_inadm_pairs[2 * i + 1]; int pt_s0 = pt_cluster[2 * node0]; int pt_s1 = pt_cluster[2 * node1]; int pt_e0 = pt_cluster[2 * node0 + 1]; int pt_e1 = pt_cluster[2 * node1 + 1]; int node0_npt = pt_e0 - pt_s0 + 1; int node1_npt = pt_e1 - pt_s1 + 1; DTYPE *Di = D_data + D_ptr[i + n_leaf_node]; krnl_eval( coord + pt_s0, n_point, node0_npt, coord + pt_s1, n_point, node1_npt, krnl_param, Di, node1_npt * krnl_dim ); } } // End of i_blk1 loop thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread; INFO_PRINTF("Build D: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } } // Build H2 representation with a kernel function void H2P_build( H2Pack_p h2pack, H2P_dense_mat_p *pp, const int BD_JIT, void *krnl_param, kernel_eval_fptr krnl_eval, kernel_bimv_fptr krnl_bimv, const int krnl_bimv_flops ) { double st, et; double *timers = h2pack->timers; if (pp == NULL) { ERROR_PRINTF("You need to provide a set of proxy points.\n"); return; } if (krnl_eval == NULL) { ERROR_PRINTF("You need to provide a valid krnl_eval().\n"); return; } h2pack->pp = pp; h2pack->BD_JIT = BD_JIT; h2pack->krnl_param = krnl_param; h2pack->krnl_eval = krnl_eval; h2pack->krnl_bimv = krnl_bimv; h2pack->krnl_bimv_flops = krnl_bimv_flops; if (BD_JIT == 1 && krnl_bimv == NULL) WARNING_PRINTF("krnl_eval() will be used in BD_JIT matvec. For better performance, consider using a krnl_bimv().\n"); // 1. Build projection matrices and skeleton row sets st = get_wtime_sec(); if (h2pack->is_HSS) H2P_build_HSS_UJ_hybrid(h2pack); else H2P_build_H2_UJ_proxy(h2pack); et = get_wtime_sec(); timers[U_BUILD_TIMER_IDX] = et - st; // 2. Build generator matrices st = get_wtime_sec(); H2P_generate_B_metadata(h2pack); if (BD_JIT == 0) H2P_build_B_AOT(h2pack); et = get_wtime_sec(); timers[B_BUILD_TIMER_IDX] = et - st; // 3. Build dense blocks st = get_wtime_sec(); H2P_generate_D_metadata(h2pack); if (BD_JIT == 0) H2P_build_D_AOT(h2pack); et = get_wtime_sec(); timers[D_BUILD_TIMER_IDX] = et - st; // 4. Set up forward and backward permutation indices int n_point = h2pack->n_point; int krnl_dim = h2pack->krnl_dim; int *coord_idx = h2pack->coord_idx; int *fwd_pmt_idx = (int*) malloc(sizeof(int) * n_point * krnl_dim); int *bwd_pmt_idx = (int*) malloc(sizeof(int) * n_point * krnl_dim); for (int i = 0; i < n_point; i++) { for (int j = 0; j < krnl_dim; j++) { fwd_pmt_idx[i * krnl_dim + j] = coord_idx[i] * krnl_dim + j; bwd_pmt_idx[coord_idx[i] * krnl_dim + j] = i * krnl_dim + j; } } h2pack->fwd_pmt_idx = fwd_pmt_idx; h2pack->bwd_pmt_idx = bwd_pmt_idx; }
DRB031-truedepfirstdimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* There is a loop-carried true dependence within the outer level loop. Data race pair: b[i][j]@66:7 vs. b[i-1][j-1]@66:15 */ #include "omprace.h" #include <omp.h> #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { omprace_init(); int i,j; int n=1000, m=1000; double b[1000][1000]; for (i=0; i<n; i++) for (j=0; j<m; j++) b[i][j] = 0.5; #pragma omp parallel for private(j) for (i=1;i<n;i++) for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; printf("b[500][500]=%f\n", b[500][500]); omprace_fini(); return 0; }
GB_binop__cmplx_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__cmplx_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__cmplx_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__cmplx_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp32) // C=scalar+B GB (_bind1st__cmplx_fp32) // C=scalar+B' GB (_bind1st_tran__cmplx_fp32) // C=A+scalar GB (_bind2nd__cmplx_fp32) // C=A'+scalar GB (_bind2nd_tran__cmplx_fp32) // C type: GxB_FC32_t // A type: float // B,b type: float // BinaryOp: cij = GxB_CMPLXF (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GxB_CMPLXF (GBX (Ax, pA, A_iso), 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GxB_CMPLXF (GBX (Bx, pB, B_iso), 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GxB_CMPLXF (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CMPLX || GxB_NO_FP32 || GxB_NO_CMPLX_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__cmplx_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__cmplx_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__cmplx_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__cmplx_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__cmplx_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__cmplx_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__cmplx_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__cmplx_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__cmplx_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = GxB_CMPLXF (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__cmplx_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = GxB_CMPLXF (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLXF (x, aij) ; \ } GrB_Info GB (_bind1st_tran__cmplx_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLXF (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__cmplx_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gdal_sebs_eta.c
#include<stdio.h> #include<omp.h> #include<math.h> #include "gdal.h" #include "cpl_conv.h" #include "sebs_eta.h" void usage() { printf( "-----------------------------------------\n"); printf( "--Modis Processing chain--OpenMP code----\n"); printf( "-----------------------------------------\n"); printf( "./sebs_eta inLst inDem inE031 inE032 inNdvi inAlbedo inSunza\n"); printf( "\toutSebs_evapfr outSebs_eta\n"); printf( "\tz_pbl t_s p_s u_s hr_s alt_ms doy\n"); printf( "-----------------------------------------\n"); return; } int main( int argc, char *argv[] ) { if( argc < 16 ) { usage(); return 1; } //Loading the input files names //----------------------------- char *inB1 = argv[1]; //LST char *inB2 = argv[2]; //DEM char *inB3 = argv[3]; //e0-1-b31 char *inB4 = argv[4]; //e0-1-b32 char *inB5 = argv[5]; //ndvi char *inB6 = argv[6]; //albedo char *inB7 = argv[7]; //sunza char *sebs_evapfrF = argv[8]; char *sebs_etaF = argv[9]; double z_pbl = atof(argv[10]); // double t_s = atof(argv[11]); // double p_s = atof(argv[12]); // double u_s = atof(argv[13]); // double hr_s = atof(argv[14]); // double alt_ms = atof(argv[15]); // double doy = atof(argv[16]); // printf("inB5 = %s\n", inB5); //Loading the input files //----------------------- GDALDatasetH hD1;//LST GDALDatasetH hD2;//DEM GDALDatasetH hD3;//e031 GDALDatasetH hD4;//e032 GDALDatasetH hD5;//ndvi GDALDatasetH hD6;//albedo GDALDatasetH hD7;//sunza GDALAllRegister(); hD1 = GDALOpen(inB1,GA_ReadOnly);//LST hD2 = GDALOpen(inB2,GA_ReadOnly);//DEM hD3 = GDALOpen(inB3,GA_ReadOnly);//e031 hD4 = GDALOpen(inB4,GA_ReadOnly);//e032 hD5 = GDALOpen(inB5,GA_ReadOnly);//ndvi hD6 = GDALOpen(inB6,GA_ReadOnly);//albedo hD7 = GDALOpen(inB7,GA_ReadOnly);//sunza if(hD1==NULL||hD2==NULL||hD3==NULL||hD4==NULL ||hD5==NULL||hD6==NULL||hD7==NULL){ printf("One or more input files "); printf("could not be loaded\n"); exit(1); } double geomx[6]={0.0}; if(GDALGetGeoTransform(hD1,geomx)==CE_None){ /* Do Nothing */ // printf( "Origin (ULx,ULy) = (%.6f,%.6f)\n", geomx[0], geomx[3] ); // printf( "Pixel Size = (%.6f,%.6f)\n", geomx[1], geomx[5] ); // printf( "Rot0 = (%.6f,%.6f)\n", geomx[2], geomx[4] ); } else { printf("ERROR: Projection acquisition problem from Band1\n"); exit(1); } //Loading the file infos //---------------------- GDALDriverH hDr1; GDALDriverH hDr2; GDALDriverH hDr3; GDALDriverH hDr4; GDALDriverH hDr5; GDALDriverH hDr6; GDALDriverH hDr7; hDr1 = GDALGetDatasetDriver(hD1); hDr2 = GDALGetDatasetDriver(hD2); hDr3 = GDALGetDatasetDriver(hD3); hDr4 = GDALGetDatasetDriver(hD4); hDr5 = GDALGetDatasetDriver(hD5); hDr6 = GDALGetDatasetDriver(hD6); hDr7 = GDALGetDatasetDriver(hD7); //Creating output file //-------------------- char **options = NULL; options = CSLSetNameValue( options, "TILED", "YES" ); options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" ); options = CSLSetNameValue( options, "PREDICTOR", "2" ); //Evapfr out GDALDatasetH hDOut0; hDOut0 = GDALCreateCopy( hDr2, sebs_evapfrF,hD2,FALSE,options,NULL,NULL); GDALRasterBandH hBOut0; hBOut0 = GDALGetRasterBand(hDOut0,1); //ETa out GDALDatasetH hDOut; hDOut = GDALCreateCopy( hDr2, sebs_etaF,hD2,FALSE,options,NULL,NULL); GDALRasterBandH hBOut; hBOut = GDALGetRasterBand(hDOut,1); //Loading the file bands //---------------------- GDALRasterBandH hB1;//LST GDALRasterBandH hB2;//DEM GDALRasterBandH hB3;//e031 GDALRasterBandH hB4;//e032 GDALRasterBandH hB5;//ndvi GDALRasterBandH hB6;//albedo GDALRasterBandH hB7;//sunza hB1 = GDALGetRasterBand(hD1,1);//LST hB2 = GDALGetRasterBand(hD2,1);//DEM hB3 = GDALGetRasterBand(hD3,1);//e031 hB4 = GDALGetRasterBand(hD4,1);//e032 hB5 = GDALGetRasterBand(hD5,1);//ndvi hB6 = GDALGetRasterBand(hD6,1);//albedo hB7 = GDALGetRasterBand(hD7,1);//sunza //Loading the data rowxrow //------------------------ int nX = GDALGetRasterBandXSize(hB1); int nY = GDALGetRasterBandYSize(hB1); int N=nX*nY; float *mat1 = (float *) CPLMalloc(sizeof(float)*N); float *mat2 = (float *) CPLMalloc(sizeof(float)*N); float *mat3 = (float *) CPLMalloc(sizeof(float)*N); float *mat4 = (float *) CPLMalloc(sizeof(float)*N); float *mat5 = (float *) CPLMalloc(sizeof(float)*N); float *mat6 = (float *) CPLMalloc(sizeof(float)*N); float *mat7 = (float *) CPLMalloc(sizeof(float)*N); float *matOut0 = (float *) CPLMalloc(sizeof(float)*N); float *matOut = (float *) CPLMalloc(sizeof(float)*N); float *latitude = (float *) CPLMalloc(sizeof(float)*N); float *ndvi_median = (float *) CPLMalloc(sizeof(float)*N); float sebsevapfr, sebseta, kin, kin24, e0; int row=nY, col=nX, rowcol, n=0; double ndvi_min=10000.0; double ndvi_max=-10000.0; double ndvimedian=0.0; int i,j,temp; double esat_avg, e_sat_sum=0.0, e_act; GDALRasterIO(hB1,GF_Read,0,0,nX,nY,mat1,nX,nY,GDT_Float32,0,0); GDALRasterIO(hB2,GF_Read,0,0,nX,nY,mat2,nX,nY,GDT_Float32,0,0); GDALRasterIO(hB3,GF_Read,0,0,nX,nY,mat3,nX,nY,GDT_Float32,0,0); GDALRasterIO(hB4,GF_Read,0,0,nX,nY,mat4,nX,nY,GDT_Float32,0,0); GDALRasterIO(hB5,GF_Read,0,0,nX,nY,mat5,nX,nY,GDT_Float32,0,0); GDALRasterIO(hB6,GF_Read,0,0,nX,nY,mat6,nX,nY,GDT_Float32,0,0); GDALRasterIO(hB7,GF_Read,0,0,nX,nY,mat7,nX,nY,GDT_Float32,0,0); //One more time NDVI for sorting purposes of median GDALRasterIO(hB5,GF_Read,0,0,nX,nY,ndvi_median,nX,nY,GDT_Float32,0,0); printf("Latitude\t"); //Latitude map for(row=0;row<nY;row++){ #pragma omp parallel for default(none) \ private(col) shared( row, nX, nY, geomx, latitude) for(col=0;col<nX;col++){ latitude[row*nY+col] = geomx[3]+geomx[4]*col+geomx[5]*row; // printf("%f ",latitude[row*nY+col]); } #pragma omp barrier } printf("\n"); #pragma omp parallel for default(none) \ private(i)\ shared(N, mat1, mat5, ndvi_median) for(i=0;i<N;i++){ if(mat1[i]==-28768||mat1[i]*0.02<=250.0||mat1[i]*0.02>365.0 ||mat5[i]<=-10000.0||mat5[i]>=10000.0||mat5[i]==0.0){ ndvi_median[i]=99999.99; } else if(mat5[i]<0.0){ ndvi_median[i]=99999.99; } } #pragma omp barrier // printf("\nndvi min max\t"); #pragma omp parallel for default(none) \ private(rowcol)\ shared(N, ndvi_min, ndvi_max, ndvi_median ) for(rowcol=0;rowcol<N;rowcol++){ if(ndvi_median[rowcol]<=99999.99){ if(ndvi_median[rowcol]*0.0001<ndvi_min) ndvi_min=ndvi_median[rowcol]*0.0001; if(ndvi_median[rowcol]*0.0001>ndvi_max) ndvi_max=ndvi_median[rowcol]*0.0001; } } #pragma omp barrier printf("ndvimin = %f [-]\tndvimax = %f [-]\n",ndvi_min, ndvi_max); //e_sat_sum for e_sat_avg value // printf("e_sat_avg\t"); n=0; #pragma omp parallel for default(none) \ private(rowcol)\ shared(N, n, e_sat_sum, mat1, mat2, ndvi_median ) for(rowcol=0;rowcol<N;rowcol++){ // if(mat1[rowcol]!=28768||mat1[rowcol]*0.02>=250.0||mat1[rowcol]*0.02<365.0 // ||mat5[rowcol]>=-10000.0||mat5[rowcol]<=10000.0||mat5[rowcol]!=0.0 // ||mat2[rowcol]<0.0||mat2[rowcol]>8888.0){ if(ndvi_median[rowcol]<99999.99){ #pragma omp atomic n+=1; #pragma omp atomic e_sat_sum+=e_sat(mat1[rowcol]*0.02,mat2[rowcol]); } } #pragma omp barrier esat_avg=e_sat_sum/n; e_act=eact(esat_avg,hr_s); // printf("= %f [Pa]\te_act = %f [Pa]\tn = %i\te_sat_sum = %f\n",esat_avg,e_act,n,e_sat_sum); printf("ndvimedian\t"); #pragma omp parallel for default(none) \ private(i, j, temp)\ shared(N, ndvi_median) for(i=0;i<N;i++){ for(j=i+1;j<N-1;j++){ if(ndvi_median[i]>ndvi_median[j]){ temp=ndvi_median[j]; ndvi_median[j]=ndvi_median[i]; ndvi_median[i]=temp; } } } #pragma omp barrier if(n%2==0) ndvimedian=((ndvi_median[n/2]+ndvi_median[(n/2)-1])/2)*0.0001; else ndvimedian=ndvi_median[n/2]*0.0001; printf("= %i <> %f [-]\t",n,ndvimedian); n=0; //Finally Process SEBS printf("sebs\n"); #pragma omp parallel for default(none) \ private(rowcol, sebseta, sebsevapfr, kin, kin24, e0)\ shared(N, e_act, doy, p_s, t_s, z_pbl, u_s, \ ndvimedian, ndvi_min, ndvi_max, alt_ms, hr_s,\ mat1, mat2, mat3, mat4, mat5, mat6, mat7, matOut0, matOut, latitude ) for(rowcol=0;rowcol<N;rowcol++){ if(mat1[rowcol]==-28768||mat1[rowcol]*0.02<=250.0||mat1[rowcol]*0.02>365.0 ||mat5[rowcol]>=10000.0||mat5[rowcol]<=0.0) matOut[rowcol] = -28768; else { e0 = 0.5*((mat3[rowcol]*0.002+0.49)+(mat4[rowcol]*0.002+0.49)); kin = k_in(mat2[rowcol],doy,mat7[rowcol]*0.01); kin24 = k_in24(latitude[rowcol], doy, mat2[rowcol]); sebsevapfr = sebs_evapfr(z_pbl,t_s,p_s,u_s,hr_s,alt_ms,e_act,kin,mat6[rowcol]*0.001,mat5[rowcol]*0.0001,ndvi_min,ndvi_max,ndvimedian,e0,mat1[rowcol]*0.02,mat2[rowcol]); // matOut0[rowcol] = mat6[rowcol]*0.001; matOut0[rowcol] = sebsevapfr; sebseta = sebs_eta(doy,mat5[rowcol]*0.0001,ndvi_max,ndvi_min,ndvimedian,u_s,z_pbl,t_s,p_s,alt_ms,e_act,latitude[rowcol],kin24,mat6[rowcol]*0.001,mat2[rowcol],mat1[rowcol]*0.02,e0,sebsevapfr); matOut[rowcol] = sebseta; // matOut[rowcol] = mat5[rowcol]*0.0001; } } #pragma omp barrier printf("write to file\n"); GDALRasterIO(hBOut0,GF_Write,0,0,nX,nY,matOut0,nX,nY,GDT_Float32,0,0); // GDALRasterIO(hBOut0,GF_Write,0,0,nX,nY,latitude,nX,nY,GDT_Float32,0,0); GDALClose(hDOut0); GDALRasterIO(hBOut,GF_Write,0,0,nX,nY,matOut,nX,nY,GDT_Float32,0,0); // GDALRasterIO(hBOut,GF_Write,0,0,nX,nY,ndvi_median,nX,nY,GDT_Float32,0,0); GDALClose(hDOut); //free memory close unused files if(mat1 != NULL) free(mat1); if(mat2 != NULL) free(mat2); if(mat3 != NULL) free(mat3); if(mat4 != NULL) free(mat4); if(mat5 != NULL) free(mat5); if(mat6 != NULL) free(mat6); if(matOut0 != NULL) free(matOut0); if(matOut != NULL) free(matOut); if(latitude != NULL) free(latitude); if(ndvi_median != NULL) free(ndvi_median); GDALClose(hD1); GDALClose(hD2); GDALClose(hD3); GDALClose(hD4); GDALClose(hD5); GDALClose(hD6); return(EXIT_SUCCESS); }
lenet.c
#include "lenet.h" #include <memory.h> #include <time.h> #include <stdlib.h> #include <math.h> #define GETLENGTH(array) (sizeof(array)/sizeof(*(array))) #define GETCOUNT(array) (sizeof(array)/sizeof(double)) #define FOREACH(i,count) for (int i = 0; i < count; ++i) #define CONVOLUTE_VALID(input,output,weight) \ { \ FOREACH(o0,GETLENGTH(output)) \ FOREACH(o1,GETLENGTH(*(output))) \ FOREACH(w0,GETLENGTH(weight)) \ FOREACH(w1,GETLENGTH(*(weight))) \ (output)[o0][o1] += (input)[o0 + w0][o1 + w1] * (weight)[w0][w1]; \ } #define CONVOLUTE_FULL(input,output,weight) \ { \ FOREACH(i0,GETLENGTH(input)) \ FOREACH(i1,GETLENGTH(*(input))) \ FOREACH(w0,GETLENGTH(weight)) \ FOREACH(w1,GETLENGTH(*(weight))) \ (output)[i0 + w0][i1 + w1] += (input)[i0][i1] * (weight)[w0][w1]; \ } #define CONVOLUTION_FORWARD(input,output,weight,bias,action) \ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ CONVOLUTE_VALID(input[x], output[y], weight[x][y]); \ FOREACH(j, GETLENGTH(output)) \ FOREACH(i, GETCOUNT(output[j])) \ ((double *)output[j])[i] = action(((double *)output[j])[i] + bias[j]); \ } #define CONVOLUTION_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad)\ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ CONVOLUTE_FULL(outerror[y], inerror[x], weight[x][y]); \ FOREACH(i, GETCOUNT(inerror)) \ ((double *)inerror)[i] *= actiongrad(((double *)input)[i]); \ FOREACH(j, GETLENGTH(outerror)) \ FOREACH(i, GETCOUNT(outerror[j])) \ bd[j] += ((double *)outerror[j])[i]; \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ CONVOLUTE_VALID(input[x], wd[x][y], outerror[y]); \ } #define SUBSAMP_MAX_FORWARD(input,output) \ { \ const int len0 = GETLENGTH(*(input)) / GETLENGTH(*(output)); \ const int len1 = GETLENGTH(**(input)) / GETLENGTH(**(output)); \ FOREACH(i, GETLENGTH(output)) \ FOREACH(o0, GETLENGTH(*(output))) \ FOREACH(o1, GETLENGTH(**(output))) \ { \ int x0 = 0, x1 = 0, ismax; \ FOREACH(l0, len0) \ FOREACH(l1, len1) \ { \ ismax = input[i][o0*len0 + l0][o1*len1 + l1] > input[i][o0*len0 + x0][o1*len1 + x1];\ x0 += ismax * (l0 - x0); \ x1 += ismax * (l1 - x1); \ } \ output[i][o0][o1] = input[i][o0*len0 + x0][o1*len1 + x1]; \ } \ } #define SUBSAMP_MAX_BACKWARD(input,inerror,outerror) \ { \ const int len0 = GETLENGTH(*(inerror)) / GETLENGTH(*(outerror)); \ const int len1 = GETLENGTH(**(inerror)) / GETLENGTH(**(outerror)); \ FOREACH(i, GETLENGTH(outerror)) \ FOREACH(o0, GETLENGTH(*(outerror))) \ FOREACH(o1, GETLENGTH(**(outerror))) \ { \ int x0 = 0, x1 = 0, ismax; \ FOREACH(l0, len0) \ FOREACH(l1, len1) \ { \ ismax = input[i][o0*len0 + l0][o1*len1 + l1] > input[i][o0*len0 + x0][o1*len1 + x1];\ x0 += ismax * (l0 - x0); \ x1 += ismax * (l1 - x1); \ } \ inerror[i][o0*len0 + x0][o1*len1 + x1] = outerror[i][o0][o1]; \ } \ } #define DOT_PRODUCT_FORWARD(input,output,weight,bias,action) \ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ ((double *)output)[y] += ((double *)input)[x] * weight[x][y]; \ FOREACH(j, GETLENGTH(bias)) \ ((double *)output)[j] = action(((double *)output)[j] + bias[j]); \ } #define DOT_PRODUCT_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad) \ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ ((double *)inerror)[x] += ((double *)outerror)[y] * weight[x][y]; \ FOREACH(i, GETCOUNT(inerror)) \ ((double *)inerror)[i] *= actiongrad(((double *)input)[i]); \ FOREACH(j, GETLENGTH(outerror)) \ bd[j] += ((double *)outerror)[j]; \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ wd[x][y] += ((double *)input)[x] * ((double *)outerror)[y]; \ } double relu(double x) { return x*(x > 0); } double relugrad(double y) { return y > 0; } static void forward(LeNet5 *lenet, Feature *features, double(*action)(double)) { CONVOLUTION_FORWARD(features->input, features->layer1, lenet->weight0_1, lenet->bias0_1, action); SUBSAMP_MAX_FORWARD(features->layer1, features->layer2); CONVOLUTION_FORWARD(features->layer2, features->layer3, lenet->weight2_3, lenet->bias2_3, action); SUBSAMP_MAX_FORWARD(features->layer3, features->layer4); CONVOLUTION_FORWARD(features->layer4, features->layer5, lenet->weight4_5, lenet->bias4_5, action); DOT_PRODUCT_FORWARD(features->layer5, features->output, lenet->weight5_6, lenet->bias5_6, action); } static void backward(LeNet5 *lenet, LeNet5 *deltas, Feature *errors, Feature *features, double(*actiongrad)(double)) { DOT_PRODUCT_BACKWARD(features->layer5, errors->layer5, errors->output, lenet->weight5_6, deltas->weight5_6, deltas->bias5_6, actiongrad); CONVOLUTION_BACKWARD(features->layer4, errors->layer4, errors->layer5, lenet->weight4_5, deltas->weight4_5, deltas->bias4_5, actiongrad); SUBSAMP_MAX_BACKWARD(features->layer3, errors->layer3, errors->layer4); CONVOLUTION_BACKWARD(features->layer2, errors->layer2, errors->layer3, lenet->weight2_3, deltas->weight2_3, deltas->bias2_3, actiongrad); SUBSAMP_MAX_BACKWARD(features->layer1, errors->layer1, errors->layer2); CONVOLUTION_BACKWARD(features->input, errors->input, errors->layer1, lenet->weight0_1, deltas->weight0_1, deltas->bias0_1, actiongrad); } static inline void load_input(Feature *features, image input) { double (*layer0)[LENGTH_FEATURE0][LENGTH_FEATURE0] = features->input; const long sz = sizeof(image) / sizeof(**input); double mean = 0, std = 0; FOREACH(j, sizeof(image) / sizeof(*input)) FOREACH(k, sizeof(*input) / sizeof(**input)) { mean += input[j][k]; std += input[j][k] * input[j][k]; } mean /= sz; std = sqrt(std / sz - mean*mean); FOREACH(j, sizeof(image) / sizeof(*input)) FOREACH(k, sizeof(*input) / sizeof(**input)) { layer0[0][j + PADDING][k + PADDING] = (input[j][k] - mean) / std; } } static inline void softmax(double input[OUTPUT], double loss[OUTPUT], int label, int count) { double inner = 0; for (int i = 0; i < count; ++i) { double res = 0; for (int j = 0; j < count; ++j) { res += exp(input[j] - input[i]); } loss[i] = 1. / res; inner -= loss[i] * loss[i]; } inner += loss[label]; for (int i = 0; i < count; ++i) { loss[i] *= (i == label) - loss[i] - inner; } } static void load_target(Feature *features, Feature *errors, int label) { double *output = (double *)features->output; double *error = (double *)errors->output; softmax(output, error, label, GETCOUNT(features->output)); } static uint8 get_result(Feature *features, uint8 count) { double *output = (double *)features->output; const int outlen = GETCOUNT(features->output); uint8 result = 0; printf("0: %7.6f ", output[0]); double maxvalue = *output; for (uint8 i = 1; i < count; ++i) { if (output[i] > maxvalue) { maxvalue = output[i]; result = i; } printf("%u: %7.6f ", i, output[i]); } printf(" \nprediction: %u\n", result); return result; } static double f64rand() { static int randbit = 0; if (!randbit) { srand((unsigned)time(0)); for (int i = RAND_MAX; i; i >>= 1, ++randbit); } unsigned long long lvalue = 0x4000000000000000L; int i = 52 - randbit; for (; i > 0; i -= randbit) lvalue |= (unsigned long long)rand() << i; lvalue |= (unsigned long long)rand() >> -i; return *(double *)&lvalue - 3; } void TrainBatch(LeNet5 *lenet, image *inputs, uint8 *labels, int batchSize) { double buffer[GETCOUNT(LeNet5)] = { 0 }; int i = 0; #pragma omp parallel for for (i = 0; i < batchSize; ++i) { Feature features = { 0 }; Feature errors = { 0 }; LeNet5 deltas = { 0 }; load_input(&features, inputs[i]); forward(lenet, &features, relu); load_target(&features, &errors, labels[i]); backward(lenet, &deltas, &errors, &features, relugrad); #pragma omp critical { FOREACH(j, GETCOUNT(LeNet5)) buffer[j] += ((double *)&deltas)[j]; } } double k = ALPHA / batchSize; FOREACH(i, GETCOUNT(LeNet5)) ((double *)lenet)[i] += k * buffer[i]; } void Train(LeNet5 *lenet, image input, uint8 label) { Feature features = { 0 }; Feature errors = { 0 }; LeNet5 deltas = { 0 }; load_input(&features, input); forward(lenet, &features, relu); load_target(&features, &errors, label); backward(lenet, &deltas, &errors, &features, relugrad); FOREACH(i, GETCOUNT(LeNet5)) ((double *)lenet)[i] += ALPHA * ((double *)&deltas)[i]; } uint8 Predict(LeNet5 *lenet, image input,uint8 count) { Feature features = { 0 }; load_input(&features, input); forward(lenet, &features, relu); return get_result(&features, count); } void Initial(LeNet5 *lenet) { for (double *pos = (double *)lenet->weight0_1; pos < (double *)lenet->bias0_1; *pos++ = f64rand()); for (double *pos = (double *)lenet->weight0_1; pos < (double *)lenet->weight2_3; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (INPUT + LAYER1)))); for (double *pos = (double *)lenet->weight2_3; pos < (double *)lenet->weight4_5; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER2 + LAYER3)))); for (double *pos = (double *)lenet->weight4_5; pos < (double *)lenet->weight5_6; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER4 + LAYER5)))); for (double *pos = (double *)lenet->weight5_6; pos < (double *)lenet->bias0_1; *pos++ *= sqrt(6.0 / (LAYER5 + OUTPUT))); for (int *pos = (int *)lenet->bias0_1; pos < (int *)(lenet + 1); *pos++ = 0); }
GB_unaryop__ainv_fp64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp64_fp32 // op(A') function: GB_tran__ainv_fp64_fp32 // C type: double // A type: float // cast: double cij = (double) aij // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp64_fp32 ( double *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rwpng.c
/*--------------------------------------------------------------------------- pngquant: RGBA -> RGBA-palette quantization program rwpng.c --------------------------------------------------------------------------- © 1998-2000 by Greg Roelofs. © 2009-2014 by Kornel Lesiński. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "png.h" #include "rwpng.h" #if USE_LCMS #include "lcms2.h" #endif #ifndef Z_BEST_COMPRESSION #define Z_BEST_COMPRESSION 9 #endif #ifndef Z_BEST_SPEED #define Z_BEST_SPEED 1 #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #endif static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg); static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg); static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg); int rwpng_read_image24_cocoa(FILE *infile, png24_image *mainprog_ptr); void rwpng_version_info(FILE *fp) { const char *pngver = png_get_header_ver(NULL); #if USE_COCOA fprintf(fp, " Using libpng %s and Apple Cocoa image reader.\n", pngver); #elif USE_LCMS fprintf(fp, " Using libpng %s with Little CMS color profile support.\n", pngver); #else fprintf(fp, " Using libpng %s and Apple Cocoa image reader.\n", pngver); #endif #if PNG_LIBPNG_VER < 10600 if (strcmp(pngver, "1.3.") < 0) { fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n" "Please recompile pngquant with a newer version of libpng (1.5 or later).\n", fp); } #endif } struct rwpng_read_data { FILE *const fp; png_size_t bytes_read; }; static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr); png_size_t read = fread(data, 1, length, read_data->fp); if (!read) { png_error(png_ptr, "Read error"); } read_data->bytes_read += read; } struct rwpng_write_state { FILE *outfile; png_size_t maximum_file_size; png_size_t bytes_written; pngquant_error retval; }; static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr); if (SUCCESS != write_state->retval) { return; } if (write_state->maximum_file_size && write_state->bytes_written + length > write_state->maximum_file_size) { write_state->retval = TOO_LARGE_FILE; } if (!fwrite(data, 1, length, write_state->outfile)) { write_state->retval = CANT_WRITE_ERROR; } write_state->bytes_written += length; } static void user_flush_data(png_structp png_ptr) { // libpng never calls this :( } static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, unsigned int height, unsigned int rowbytes) { if (!rowbytes) { rowbytes = png_get_rowbytes(png_ptr, info_ptr); } png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0])); if (!row_pointers) return NULL; for(unsigned int row = 0; row < height; ++row) { row_pointers[row] = base + row * rowbytes; } return row_pointers; } static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk) { if (0 == memcmp("iCCP", in_chunk->name, 5) || 0 == memcmp("cHRM", in_chunk->name, 5) || 0 == memcmp("gAMA", in_chunk->name, 5)) { return 0; // not handled } struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr); struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk)); memcpy(chunk->name, in_chunk->name, 5); chunk->size = in_chunk->size; chunk->location = in_chunk->location; chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL; if (in_chunk->size) { memcpy(chunk->data, in_chunk->data, in_chunk->size); } chunk->next = *head; *head = chunk; return 1; // marks as "handled", libpng won't store it } /* retval: 0 = success 21 = bad sig 22 = bad IHDR 24 = insufficient memory 25 = libpng error (via longjmp()) 26 = wrong PNG color type (no alpha channel) */ pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int verbose) { png_structp png_ptr = NULL; png_infop info_ptr = NULL; png_size_t rowbytes; int color_type, bit_depth; png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler); if (!png_ptr) { return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_read_struct(&png_ptr, NULL, NULL); return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a non-trivial * libpng function */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */ } png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback); struct rwpng_read_data read_data = {infile, 0}; png_set_read_fn(png_ptr, &read_data, user_read_data); png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */ /* alternatively, could make separate calls to png_get_image_width(), * etc., but want bit_depth and color_type for later [don't care about * compression_type and filter_type => NULLs] */ png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height, &bit_depth, &color_type, NULL, NULL, NULL); /* expand palette images to RGB, low-bit-depth grayscale images to 8 bits, * transparency chunks to full alpha channel; strip 16-bit-per-sample * images to 8 bits per sample; and convert grayscale to RGB[A] */ /* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */ if (!(color_type & PNG_COLOR_MASK_ALPHA)) { #ifdef PNG_READ_FILLER_SUPPORTED png_set_expand(png_ptr); png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER); #else fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->retval = 26; return mainprog_ptr->retval; #endif } if (bit_depth == 16) { png_set_strip_16(png_ptr); } if (!(color_type & PNG_COLOR_MASK_COLOR)) { png_set_gray_to_rgb(png_ptr); } /* get source gamma for gamma correction, or use sRGB default */ double gamma = 0.45455; if (!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) { png_get_gAMA(png_ptr, info_ptr, &gamma); } mainprog_ptr->gamma = gamma; png_set_interlace_handling(png_ptr); /* all transformations have been registered; now update info_ptr data, * get rowbytes and channels, and allocate image memory */ png_read_update_info(png_ptr, info_ptr); rowbytes = png_get_rowbytes(png_ptr, info_ptr); if ((mainprog_ptr->rgba_data = malloc(rowbytes*mainprog_ptr->height)) == NULL) { fprintf(stderr, "pngquant readpng: unable to allocate image data\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return PNG_OUT_OF_MEMORY_ERROR; } png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); /* now we can go ahead and just read the whole image */ png_read_image(png_ptr, row_pointers); /* and we're done! (png_read_end() can be omitted if no processing of * post-IDAT text/time/etc. is desired) */ png_read_end(png_ptr, NULL); #if USE_LCMS #if PNG_LIBPNG_VER < 10500 png_charp ProfileData; #else png_bytep ProfileData; #endif png_uint_32 ProfileLen; cmsHPROFILE hInProfile = NULL; /* color_type is read from the image before conversion to RGBA */ int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR; mainprog_ptr->lcms_status = NONE; /* embedded ICC profile */ if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) { hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen); cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile); /* only RGB (and GRAY) valid for PNGs */ if (colorspace == cmsSigRgbData && COLOR_PNG) { mainprog_ptr->lcms_status = ICCP; } else { if (colorspace == cmsSigGrayData && !COLOR_PNG) { mainprog_ptr->lcms_status = ICCP_WARN_GRAY; } cmsCloseProfile(hInProfile); hInProfile = NULL; } } /* build RGB profile from cHRM and gAMA */ if (hInProfile == NULL && COLOR_PNG && !png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) && png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) && png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) { cmsCIExyY WhitePoint; cmsCIExyYTRIPLE Primaries; png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y, &Primaries.Red.x, &Primaries.Red.y, &Primaries.Green.x, &Primaries.Green.y, &Primaries.Blue.x, &Primaries.Blue.y); WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0; cmsToneCurve *GammaTable[3]; GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma); hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable); cmsFreeToneCurve(GammaTable[0]); mainprog_ptr->lcms_status = GAMA_CHRM; } /* transform image to sRGB colorspace */ if (hInProfile != NULL) { cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile(); cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8, hOutProfile, TYPE_RGBA_8, INTENT_PERCEPTUAL, omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0); #pragma omp parallel for \ if (mainprog_ptr->height*mainprog_ptr->width > 8000) \ schedule(static) for (unsigned int i = 0; i < mainprog_ptr->height; i++) { /* It is safe to use the same block for input and output, when both are of the same TYPE. */ cmsDoTransform(hTransform, row_pointers[i], row_pointers[i], mainprog_ptr->width); } cmsDeleteTransform(hTransform); cmsCloseProfile(hOutProfile); cmsCloseProfile(hInProfile); mainprog_ptr->gamma = 0.45455; } #endif png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->file_size = read_data.bytes_read; mainprog_ptr->row_pointers = (unsigned char **)row_pointers; return SUCCESS; } static void rwpng_free_chunks(struct rwpng_chunk *chunk) { if (!chunk) return; rwpng_free_chunks(chunk->next); free(chunk->data); free(chunk); } void rwpng_free_image24(png24_image *image) { free(image->row_pointers); image->row_pointers = NULL; free(image->rgba_data); image->rgba_data = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } void rwpng_free_image8(png8_image *image) { free(image->indexed_data); image->indexed_data = NULL; free(image->row_pointers); image->row_pointers = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } pngquant_error rwpng_read_image24(FILE *infile, png24_image *input_image_p, int verbose) { #if USE_COCOA return rwpng_read_image24_cocoa(infile, input_image_p); #else return rwpng_read_image24_libpng(infile, input_image_p, verbose); #endif } static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression) { /* could also replace libpng warning-handler (final NULL), but no need: */ *png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL); if (!(*png_ptr_p)) { return LIBPNG_INIT_ERROR; /* out of memory */ } *info_ptr_p = png_create_info_struct(*png_ptr_p); if (!(*info_ptr_p)) { png_destroy_write_struct(png_ptr_p, NULL); return LIBPNG_INIT_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a PNG-writing * libpng function, unless an alternate error handler was installed-- * but compatible error handlers must either use longjmp() themselves * (as in this program) or exit immediately, so here we go: */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_write_struct(png_ptr_p, info_ptr_p); return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */ } png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION); png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better return SUCCESS; } void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers) { png_write_info(*png_ptr_p, *info_ptr_p); png_set_packing(*png_ptr_p); png_write_image(*png_ptr_p, row_pointers); png_write_end(*png_ptr_p, NULL); png_destroy_write_struct(png_ptr_p, info_ptr_p); } void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma) { /* remap sets gamma to 0.45455 */ png_set_gAMA(png_ptr, info_ptr, gamma); png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual } pngquant_error rwpng_write_image8(FILE *outfile, const png8_image *mainprog_ptr) { png_structp png_ptr; png_infop info_ptr; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression); if (retval) return retval; struct rwpng_write_state write_state; write_state = (struct rwpng_write_state){ .outfile = outfile, .maximum_file_size = mainprog_ptr->maximum_file_size, .retval = SUCCESS, }; png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data); // Palette images generally don't gain anything from filtering png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma); /* set the image parameters appropriately */ int sample_depth; #if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */ if (mainprog_ptr->num_palette <= 2) sample_depth = 1; else if (mainprog_ptr->num_palette <= 4) sample_depth = 2; else if (mainprog_ptr->num_palette <= 16) sample_depth = 4; else #endif sample_depth = 8; struct rwpng_chunk *chunk = mainprog_ptr->chunks; int chunk_num=0; while(chunk) { png_unknown_chunk pngchunk = { .size = chunk->size, .data = chunk->data, .location = chunk->location, }; memcpy(pngchunk.name, chunk->name, 5); png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1); #if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600 png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR); #endif chunk = chunk->next; chunk_num++; } png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, sample_depth, PNG_COLOR_TYPE_PALETTE, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_set_PLTE(png_ptr, info_ptr, &mainprog_ptr->palette[0], mainprog_ptr->num_palette); if (mainprog_ptr->num_trans > 0) { png_set_tRNS(png_ptr, info_ptr, mainprog_ptr->trans, mainprog_ptr->num_trans, NULL); } rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers); return write_state.retval; } pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr, int filter) { png_structp png_ptr; png_infop info_ptr; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0); if (retval) return retval; png_init_io(png_ptr, outfile); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma); png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, 8, PNG_COLOR_TYPE_RGB_ALPHA, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, filter); png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); rwpng_write_end(&info_ptr, &png_ptr, row_pointers); free(row_pointers); return SUCCESS; } static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) { fprintf(stderr, " %s\n", msg); } static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) { } static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg) { rwpng_png_image *mainprog_ptr; /* This function, aside from the extra step of retrieving the "error * pointer" (below) and the fact that it exists within the application * rather than within libpng, is essentially identical to libpng's * default error handler. The second point is critical: since both * setjmp() and longjmp() are called from the same code, they are * guaranteed to have compatible notions of how big a jmp_buf is, * regardless of whether _BSD_SOURCE or anything else has (or has not) * been defined. */ fprintf(stderr, " error: %s\n", msg); fflush(stderr); mainprog_ptr = png_get_error_ptr(png_ptr); if (mainprog_ptr == NULL) abort(); longjmp(mainprog_ptr->jmpbuf, 1); }
memdbg.h
/* ****** NOTE ****** * This header file should be the LAST header file included within every * .c file within the project. If there are .h files that have actual * code in them, then this header should be the last include within that * .h file, and that .h file should be the last one included within the * .c file. * ****** NOTE ***** */ #if !defined (__MEM_DBG_H_) #define __MEM_DBG_H_ // values to use within the MemDbg_Validate() function. #define MEMDBG_VALIDATE_MIN 0 #define MEMDBG_VALIDATE_DEEP 1 #define MEMDBG_VALIDATE_DEEPER 2 #define MEMDBG_VALIDATE_DEEPEST 3 #include <stdio.h> #include <stdlib.h> #include "os.h" #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #include <string.h> #include "memory.h" #if defined (MEMDBG_ON) /* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ /* * memdbg.h * Memory management debugging (at runtime) * * memdbg contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. */ /* these functions can be called by client code. Normally Memdbg_Used() and * MemDbg_Display() would be called at program exit. That will dump a list * of any memory that was not released. The MemDbg_Validate() can be called * pretty much any time. That function will walk the memory allocation linked * lists, and sqwack if there are problems, such as overwrites, freed memory that * has been written to, etc. It would likely be good to call MemDbg_Validate() * within benchmarking, after every format is tested. * * TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display() * and a function to get the 'current' state of memory as a handle. Thus, a * format self test could get a handle BEFORE starting, and then check after, and * ONLY show leaked memory from the time the handle was obtained, which was at the * start of the self test. Thus it would only show leaks from that format test. * * These functions are NOT thread safe. Do not call them within OMP blocks of code. * Normally, these would be called at program exit, or within things like format * self test code, etc, and not within OMP. But this warning is here, so that * it is known NOT to call within OMP. */ extern size_t MemDbg_Used(int show_freed); extern void MemDbg_Display(FILE *); extern void MemDbg_Validate(int level); extern void MemDbg_Validate_msg(int level, const char *pMsg); extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData); /* these functions should almost NEVER be called by any client code. They * are listed here, because the macros need to know their names. Client code * should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc() * If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this * function would not be declared here, AND at link time, the function would * not be found. * NOTE, these functions should be thread safe in OMP builds (using #pragma omp atomic) * also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is * very slow, and the thread safety required makes it even slow. This is not only talking * about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that * at almost all costs, and performance will usually go up. */ extern void *MEMDBG_alloc(size_t, char *, int); extern void *MEMDBG_alloc_align(size_t, int, char *, int); extern void *MEMDBG_calloc(size_t count, size_t, char *, int); extern void *MEMDBG_realloc(void *, size_t, char *, int); extern void MEMDBG_free(const void *, char *, int); extern char *MEMDBG_strdup(const char *, char *, int); #if !defined(__MEMDBG__) /* we get here on every file compiled EXCEPT memdbg.c */ #undef malloc #undef realloc #undef free #undef strdup #undef libc_free #undef libc_calloc #undef libc_malloc #define libc_free(a) MEMDBG_libc_free(a) #define libc_malloc(a) MEMDBG_libc_alloc(a) #define libc_calloc(a,b) MEMDBG_libc_calloc(a,b) #define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__) #define calloc(a,b) MEMDBG_calloc(a,b,__FILE__,__LINE__) #define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__) #define free(a) MEMDBG_free((a),__FILE__,__LINE__) #define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__) #endif /* !defined __MEMDBG__ */ /* pass the file handle to write to (normally stderr) */ #define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \ if (MemDbg_Used(0) > 0 || getenv("MEMDBG")) MemDbg_Display(a); \ MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0) typedef struct MEMDBG_HANDLE_t { unsigned id; unsigned alloc_cnt; size_t mem_size; } MEMDBG_HANDLE; /* * these functions give a caller some of the INSIDE information about the * allocated object. We simply return data from inside the memdbg header. * NOTE, if fence post is not valid, we still return something, BUT will * also return something in the err_msg stating this may not be valid. */ /* The count 'id' of an allocated block. Same as used in leak report */ unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg); /* the size allocated of the contained block */ size_t MEMDBG_get_size(const void *ptr, const char **err_msg); /* what file (source) did the allocation */ const char *MEMDBG_get_file(const void *ptr, const char **err_msg); /* what file (source) line number did the allocation */ unsigned MEMDBG_get_line(const void *ptr, const char **err_msg); /* * these functions allow taking a memory snapshot, calling some code, then validating that memory * is the same after the code. This will help catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ /* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id); /* will not exit on leaks. Does exit, on memory overwrite corruption. */ void MEMDBG_checkSnapshot(MEMDBG_HANDLE); /* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks); /* * the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate * so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected * to leak, until program exit. At that time, any that was not freed, will be shown as leaked. * THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe. */ void MEMDBG_tag_mem_from_alloc_tiny(void *); extern void MEMDBG_libc_free(void *); extern void *MEMDBG_libc_alloc(size_t size); extern void *MEMDBG_libc_calloc(size_t count, size_t size); #else #define libc_alloc alloc #define libc_calloc calloc #define libc_malloc malloc #define libc_free free #define MemDbg_Used(a) 0 #define MemDbg_Display(a) #define MemDbg_Validate(a) #define MemDbg_Validate_msg(a,b) #define MemDbg_Validate_msg2(a,b,c) #define MEMDBG_PROGRAM_EXIT_CHECKS(a) #define MEMDBG_tag_mem_from_alloc_tiny(a) #define MEMDBG_HANDLE int #define MEMDBG_getSnapshot(a) 0 #define MEMDBG_checkSnapshot(a) if(a) printf(" \b") #define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if(a) printf(" \b") #endif /* MEMDBG_ON */ #endif /* __MEMDBG_H_ */
wendy.c
/* wendy.c: One time-step of a one-dimensional N-body code */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <wendy.h> #include <bst.h> #include <parallel_sort.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_wtime() (clock()*1./CLOCKS_PER_SEC) #endif // functionality to perform argsort in approximate solver #define SORT_NAME argsort #define SORT_TYPE struct array_w_index #define SORT_CMP(x,y) ( ( ((x).val) < ((y).val) ) ? -1: ( ( ((x).val) > ((y).val) ) ? 1: 0 ) ) #include <sort.h> // For parallel_sort, need standard (qsort-like) comparison function int argsort_compare_function(const void *a,const void *b) { struct array_w_index *x = (struct array_w_index *) a; struct array_w_index *y = (struct array_w_index *) b; if (x->val < y->val) return -1; else if (x->val > y->val) return 1; else return 0; } double _solve_coll_quad(double c0, double c1, double c2){ // Solves for collisions under quadratic motion: a t^2/2 + vt + x double mba; mba= -c1/c2; return 0.5 * ( mba + sqrt ( mba * mba - 4. * c0/c2) ); } double _solve_coll_harm(double c0, double c1, double c2, double omega){ // Solves for collisions under harmonic motion: A cos(omegaxt+phi)+a/omega^2 double A, B, out; A= c0-c2; B= c1/omega; out= -asin(c2/sqrt(B*B+A*A))-atan2(A,B); if ( out < 0 ) { out= M_PI+asin(c2/sqrt(B*B+A*A))-atan2(A,B); } //printf("Solves equation22? %g,%g,%g,%g\n",A*cos(out),B*sin(out),C, // A * cos(out) + B * sin(out) - C); //fflush(stdout); return out/omega; } void _wendy_nbody_onestep(int N, double * x, double * v, double * a, double * m, int * sindx, int * cindx, double * next_tcoll, double * tcoll, double dt, int maxcoll, int * err, int * ncoll, double * time_elapsed){ int cnt_coll,ii, tmpi; int c_in_x_indx, c_in_x_next_indx; double dv,tdt, dm, tmpd; struct node* minNode; double * t= (double *) malloc ( N * sizeof(double) ); for (ii=0; ii < N; ii++) *(t+ii)= 0.; cnt_coll= 0; // Build binary search tree for keeping track of collision times int * idx= (int *) malloc ( (N-1) * sizeof(int) ); for (ii=0; ii < N-1; ii++) *(idx+ii)= ii; struct node* bst_tcoll= bst_build(N-1,idx,tcoll); free(idx); //bst_inorder(bst_tcoll); // Time how long the loop takes clock_t time_begin= clock(); while ( *next_tcoll < dt && cnt_coll < maxcoll ){ //printf("Colliding in %f\n",*next_tcoll); //fflush(stdout); cnt_coll+= 1; // collide, update collided particles c_in_x_indx= *(sindx+ *cindx); c_in_x_next_indx= *(sindx+ *cindx+1); tdt= ( *next_tcoll - *(t + c_in_x_indx) ); dv= *(a + c_in_x_indx) * tdt; *(x + c_in_x_indx)+= dv * tdt / 2. + *(v + c_in_x_indx) * tdt; *(v + c_in_x_indx)+= dv; *(t + c_in_x_indx)= *next_tcoll; tdt= ( *next_tcoll - *(t + c_in_x_next_indx) ); dv= *(a + c_in_x_next_indx) * tdt; *(x + c_in_x_next_indx)+= dv * tdt / 2. + *(v + c_in_x_next_indx) * tdt; *(v + c_in_x_next_indx)+= dv; *(t + c_in_x_next_indx)= *next_tcoll; // swap tmpi= *(sindx+ *cindx); *(sindx+ *cindx)= *(sindx+ *cindx+1); *(sindx+ *cindx+1)= tmpi; // track mass and update accelerations dm= *(m + c_in_x_next_indx) - *(m + c_in_x_indx); *(a + c_in_x_indx)-= dm; *(a + c_in_x_next_indx)-= dm; tmpd= *(a + c_in_x_indx); *(a + c_in_x_indx)= *(a + c_in_x_next_indx); *(a + c_in_x_next_indx)= tmpd; // Update collision times, solution for delta x = 0 //printf("Collide in %f\n",2.* (*(v + c_in_x_indx) - *(v + c_in_x_next_indx)) / // (*(a + c_in_x_next_indx) - *(a + c_in_x_indx))); //fflush(stdout); tmpd= 2.* (*(v + c_in_x_indx) - *(v + c_in_x_next_indx)) / (*(a + c_in_x_next_indx) - *(a + c_in_x_indx)); bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx); *(tcoll + *cindx)= *next_tcoll+tmpd; bst_tcoll= bst_forceInsert(bst_tcoll,*cindx,tcoll+*cindx); // Abuse the c_in_x_indx and c_in_x_next_indx arrays if ( *cindx > 0 ){ c_in_x_indx= *(sindx+ *cindx-1); c_in_x_next_indx= *(sindx+ *cindx); tdt= *(t + c_in_x_indx) - *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx-1); *(tcoll + *cindx -1)= *next_tcoll + _solve_coll_quad(*(x + c_in_x_indx) + *(a+c_in_x_indx) * tdt*tdt / 2. - *(v+c_in_x_indx) * tdt - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(a + c_in_x_indx) * tdt - *(v + c_in_x_next_indx), 0.5*(*(a + c_in_x_indx) - *(a + c_in_x_next_indx))); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx-1,tcoll+*cindx-1); } if ( *cindx < N-2 ){ c_in_x_indx= *(sindx+ *cindx+2); c_in_x_next_indx= *(sindx+ *cindx+1); tdt= *(t + c_in_x_indx) - *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx+1); *(tcoll + *cindx+1)= *next_tcoll + _solve_coll_quad(*(x + c_in_x_indx) + *(a+c_in_x_indx) * tdt*tdt / 2. - *(v+c_in_x_indx) * tdt - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(a + c_in_x_indx) * tdt - *(v + c_in_x_next_indx), 0.5*(*(a + c_in_x_indx) - *(a + c_in_x_next_indx))); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx+1,tcoll+*cindx+1); } // Find minimum minNode= bst_minValueNode(bst_tcoll); *cindx= minNode->idx; *next_tcoll= *minNode->val; //printf("Next one %f\n",*next_tcoll); //fflush(stdout); } clock_t time_end= clock(); *time_elapsed= (double) (time_end-time_begin) / CLOCKS_PER_SEC; //printf("Next %f\n",*next_tcoll-dt); //fflush(stdout); // Update all to next snapshot for (ii=0; ii < N; ii++) { tdt= dt - *(t+ii); dv= *(a+ii) * tdt; *(x+ii)+= dv * tdt / 2. + *(v+ii) * tdt; *(v+ii)+= dv; } for (ii=0; ii < N-1; ii++) { *(tcoll+ii)-= dt; } *next_tcoll-= dt; free(t); bst_destroy(bst_tcoll); *ncoll= cnt_coll; if ( cnt_coll == maxcoll ) *err= -2; } void _wendy_nbody_harm_onestep(int N, double * x, double * v, double * a, double * m, int * sindx, int * cindx, double * next_tcoll, double * tcoll,double dt, int maxcoll, int * err, int * ncoll,double * time_elapsed, double omega){ int cnt_coll,ii, tmpi; int c_in_x_indx, c_in_x_next_indx; double tdt, dm, tmpd, cosot, sinot; struct node* minNode; double * t= (double *) malloc ( N * sizeof(double) ); for (ii=0; ii < N; ii++) *(t+ii)= 0.; cnt_coll= 0; // Build binary search tree for keeping track of collision times int * idx= (int *) malloc ( (N-1) * sizeof(int) ); for (ii=0; ii < N-1; ii++) *(idx+ii)= ii; struct node* bst_tcoll= bst_build(N-1,idx,tcoll); free(idx); //bst_inorder(bst_tcoll); // Time how long the loop takes clock_t time_begin= clock(); while ( *next_tcoll < dt && cnt_coll < maxcoll ){ //printf("Colliding in %f\n",*next_tcoll); //fflush(stdout); cnt_coll+= 1; // collide, update collided particles c_in_x_indx= *(sindx+ *cindx); c_in_x_next_indx= *(sindx+ *cindx+1); tdt= ( *next_tcoll - *(t + c_in_x_indx) ); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_indx) - *(a + c_in_x_indx); *(x + c_in_x_indx)= tmpd * cosot \ + *(v + c_in_x_indx) / omega * sinot \ + *(a + c_in_x_indx); *(v + c_in_x_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_indx) * cosot; *(t + c_in_x_indx)= *next_tcoll; tdt= ( *next_tcoll - *(t + c_in_x_next_indx) ); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_next_indx) - *(a + c_in_x_next_indx); *(x + c_in_x_next_indx)= tmpd * cosot \ + *(v + c_in_x_next_indx) / omega * sinot \ + *(a + c_in_x_next_indx) ; *(v + c_in_x_next_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_next_indx) * cosot; *(t + c_in_x_next_indx)= *next_tcoll; //printf("Collide? %g, %g, %g\n",*(x + c_in_x_indx),*(x + c_in_x_next_indx), // *(x + c_in_x_indx)-*(x + c_in_x_next_indx)); //fflush(stdout); // swap tmpi= *(sindx+ *cindx); *(sindx+ *cindx)= *(sindx+ *cindx+1); *(sindx+ *cindx+1)= tmpi; // track mass and update accelerations dm= *(m + c_in_x_next_indx) - *(m + c_in_x_indx); *(a + c_in_x_indx)-= dm; *(a + c_in_x_next_indx)-= dm; tmpd= *(a + c_in_x_indx); *(a + c_in_x_indx)= *(a + c_in_x_next_indx); *(a + c_in_x_next_indx)= tmpd; // Update collision times, solution for delta x = 0 tmpd= (*(v + c_in_x_indx) - *(v + c_in_x_next_indx)) / \ (*(a + c_in_x_next_indx) - *(a + c_in_x_indx)) / omega; tmpd*= tmpd; tmpd= (1.-tmpd)/(1.+tmpd); tmpd= acos(tmpd)/omega; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx); *(tcoll + *cindx)= *next_tcoll+tmpd; bst_tcoll= bst_forceInsert(bst_tcoll,*cindx,tcoll+*cindx); //printf("Collide in %f\n",*(tcoll + *cindx)-*next_tcoll); //fflush(stdout); // Abuse the c_in_x_indx and c_in_x_next_indx arrays if ( *cindx > 0 ){ c_in_x_indx= *(sindx+ *cindx-1); c_in_x_next_indx= *(sindx+ *cindx); // Also forward the previous sheet to the collision time for convenience tdt= *next_tcoll - *(t + c_in_x_indx); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_indx) - *(a + c_in_x_indx); *(x + c_in_x_indx)= tmpd * cosot \ + *(v + c_in_x_indx) / omega * sinot \ + *(a + c_in_x_indx); *(v + c_in_x_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_indx) * cosot; *(t + c_in_x_indx)= *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx-1); *(tcoll + *cindx -1)= *next_tcoll + _solve_coll_harm(*(x + c_in_x_indx) - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(v + c_in_x_next_indx), *(a + c_in_x_indx) - *(a + c_in_x_next_indx), omega); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx-1,tcoll+*cindx-1); } if ( *cindx < N-2 ){ c_in_x_indx= *(sindx+ *cindx+2); c_in_x_next_indx= *(sindx+ *cindx+1); // Also forward the next sheet to the collision time for convenience tdt= *next_tcoll - *(t + c_in_x_indx); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_indx) - *(a + c_in_x_indx); *(x + c_in_x_indx)= tmpd * cosot \ + *(v + c_in_x_indx) / omega * sinot \ + *(a + c_in_x_indx); *(v + c_in_x_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_indx) * cosot; *(t + c_in_x_indx)= *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx+1); *(tcoll + *cindx+1)= *next_tcoll + _solve_coll_harm(*(x + c_in_x_indx) - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(v + c_in_x_next_indx), *(a + c_in_x_indx) - *(a + c_in_x_next_indx), omega); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx+1,tcoll+*cindx+1); } // Find minimum minNode= bst_minValueNode(bst_tcoll); *cindx= minNode->idx; *next_tcoll= *minNode->val; //printf("Next one %f\n",*next_tcoll); //fflush(stdout); } clock_t time_end= clock(); *time_elapsed= (double) (time_end-time_begin) / CLOCKS_PER_SEC; //printf("Next %f\n",*next_tcoll-dt); //fflush(stdout); // Update all to next snapshot for (ii=0; ii < N; ii++) { tdt= dt - *(t+ii); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x+ii) - *(a+ii); *(x+ii)= tmpd * cosot + *(v+ii) / omega * sinot + *(a+ii); *(v+ii)= -tmpd * omega * sinot + *(v+ii) * cosot; } for (ii=0; ii < N-1; ii++) { *(tcoll+ii)-= dt; } *next_tcoll-= dt; free(t); bst_destroy(bst_tcoll); *ncoll= cnt_coll; if ( cnt_coll == maxcoll ) *err= -2; } // Approximate solution using leapfrog integration w/ exact forces void leapfrog_leapq(int N, struct array_w_index *xi,double *v,double dt){ int ii; #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii < N; ii++) (xi+ii)->val+= dt * *(v + (xi+ii)->idx); } void leapfrog_leappq(int N, struct array_w_index * xi, double *v,double dt_kick,double dt_drift, double *a){ int ii; #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) { *(v + (xi+ii)->idx)+= dt_kick * *(a + (xi+ii)->idx); (xi+ii)->val+= dt_drift * *(v + (xi+ii)->idx); } } void _nbody_force(int N, int sort_type, struct array_w_index * xi, double * x,double * m, double * a, double t, double totmass, double omega2, double (*ext_force)(int N,double *x, double t, double *a), double * cumulmass){ int ii; // argsort switch ( sort_type ) { case 0: argsort_quick_sort(xi,N); break; case 1: argsort_merge_sort(xi,N); break; case 2: argsort_tim_sort(xi,N); break; case 3: qsort(xi,N,sizeof(struct array_w_index),argsort_compare_function); break; case 4: parallel_sort(xi,N,sizeof(struct array_w_index),argsort_compare_function); break; } // Compute cumulative mass and acceleration for (ii=0; ii< N-1; ii++) *(cumulmass+ii+1)= *(cumulmass+ii) + *(m+(xi+ii)->idx); // Evaluate external force if ( ext_force && N > EXTERNAL_SWITCH ) { // Need to de-sort to pass x to ext_force's array calculation #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(x+(xi+ii)->idx)= (xi+ii)->val; ext_force(N,x,t,a); } else if ( ext_force ) // for < EXTERNAL_SWITCH, use sequential evaluation for (ii=0; ii< N; ii++) *(a + (xi+ii)->idx)= ext_force(1,&(xi+ii)->val,t,NULL); else #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(a+ii)= 0.; if ( omega2 < 0 ) #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(a + (xi+ii)->idx)+= totmass - 2 * *(cumulmass+ii) - *(m+(xi+ii)->idx); else #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(a + (xi+ii)->idx)+= totmass - 2 * *(cumulmass+ii) \ - *(m+(xi+ii)->idx) - omega2 * (xi+ii)->val; } void _wendy_nbody_approx_onestep(int N, struct array_w_index * xi, double * x, double * v, double * m, double * a, double totmass, double dt, int nleap, double * t0, double omega2, double (*ext_force)(int,double *x, double t,double *a), int sort_type, int * err,double * time_elapsed, double * cumulmass){ int ii; double time_begin, time_end; time_begin= omp_get_wtime(); //drift half leapfrog_leapq(N,xi,v,dt/2.); //now drift full for a while for (ii=0; ii < (nleap-1); ii++){ //kick+drift _nbody_force(N,sort_type,xi,x,m,a,*t0,totmass,omega2,*ext_force,cumulmass); if ( ext_force ) *t0+= dt; leapfrog_leappq(N,xi,v,dt,dt,a); } //end with one last kick and drift _nbody_force(N,sort_type,xi,x,m,a,*t0,totmass,omega2,*ext_force,cumulmass); if ( ext_force ) *t0+= dt; leapfrog_leappq(N,xi,v,dt,dt/2.,a); //de-sort #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(x+(xi+ii)->idx)= (xi+ii)->val; time_end= omp_get_wtime(); *time_elapsed= time_end-time_begin; }
omp-workshare-section.c
/***************************************************************************** Example 1.10 : omp-workshare-section.c Objective : Write an OpenMP Program to illustrate Work-Sharing section. In this , the OpenMP SECTION directive is used to assign different operations to each thread that executes a SECTION. Input :a) Number of threads b) Size of matrices (numofrows and noofcols ) Output : Time taken by the parallel and serial computation Author : RarchK *********************************************************************************/ #include <stdio.h> #include <sys/time.h> #include <omp.h> #include <math.h> #include <stdlib.h> /* Main Program */ main(int argc,char **argv) { int NoofCols,NoofRows,index,VectorSize,icol,vec_col,irow,MatrixFileStatus=1,VectorFileStatus=1; int matrowsize,matcolsize,vectorsize; float **Matrix, **Matrix_one,*Vector, *Result_Vector; FILE *fp1; struct timeval TimeValue_Start, TimeValue1_Start; struct timezone TimeZone_Start, TimeZone1_Start; struct timeval TimeValue_Final, TimeValue1_Final; struct timezone TimeZone_Final, TimeZone1_Final; long time_start, time_end; double time_overhead1,time_overhead2; int icol1 ,irow1; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP Work-Sharing Construct ."); printf("\n\t\t The OpenMP SECTION directive is used to assign different operations to each "); printf("\n\t\t thread that executes a SECTION."); printf("\n\t\t OpenMP Work-Shareing Construct : SECTION Directive . "); printf("\n\t\t..........................................................................\n"); if(argc != 4){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <NoOfRows> <NoOfColums> <Vector-size> \n"); exit(-1); } /* printf("\n\t\t Enter the No. of Rows and Columns of Matrix and Vector Size \n"); scanf("%d%d%d",&matrowsize,&matcolsize,&vectorsize); */ matrowsize = atoi(argv[1]); matcolsize = atoi(argv[2]); vectorsize = atoi(argv[3]); if( matcolsize != vectorsize) { printf("\n\n\t\t Matrix into vertor multiplication not possible "); printf("\n\t\t Vector Size != Matrix Col size \n"); exit(-1); } /* ....................................................................................... The OpenMP SECTION directive is used to assign different operations to each thread that executes a SECTION. - One thread will do the file operation - Second thread will do the matrix vector computation ........................................................................................*/ gettimeofday(&TimeValue_Start, &TimeZone_Start); omp_set_num_threads(2); #pragma omp parallel { #pragma omp sections { #pragma omp section { printf("\n\n\t\t %d - Thread is doing section 1 ", omp_get_thread_num()); if ((fp1 = fopen ("./data/mdata.inp", "r")) == NULL){ printf("\n\t\t Failed to open the file\n "); MatrixFileStatus = 0; } /* if(MatrixFileStatus != 0) { fscanf(fp1,"%d%d\n",&NoofRows,&NoofCols); Matrix = (float **)malloc(NoofRows*sizeof(float *)); for(irow=0 ;irow<NoofRows; irow++){ Matrix[irow] = (float *)malloc(NoofCols*sizeof(float)); for(icol=0; icol<NoofCols; icol++) fscanf(fp1,"%f",&Matrix[irow][icol]); } fclose(fp1); free(Matrix); } */ } #pragma omp section { printf("\n\n\t\t %d - Thread is doing section 2 ", omp_get_thread_num()); Matrix_one = (float **)malloc( matrowsize *sizeof(float *)); Vector = (float *)malloc( vectorsize *sizeof(float)); Result_Vector = (float *)malloc(matrowsize *sizeof(float)); for(irow1=0 ; irow1<matrowsize ; irow1++) { Matrix_one[irow1] = (float *)malloc(matcolsize * sizeof(float)); for(icol1=0; icol1<matcolsize; icol1++) { Matrix_one[irow1][icol1] = (irow1*1.0) ; Vector[irow1] = (icol1*1.0) ; Result_Vector[irow1] = 0.0; } } for(icol1 = 0; icol1 < matrowsize ; icol1++) for(irow1 = 0 ; irow1 < matcolsize; irow1++) { Result_Vector[icol1] = Result_Vector[icol1] + Vector[irow1] * Matrix_one[irow1][icol1]; } free(Matrix_one); free(Vector); free(Result_Vector); } } /* End of sections */ } /* End of parallel section */ gettimeofday(&TimeValue_Final, &TimeZone_Final); time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead1 = (time_end - time_start)/1000000.0; /*................................................................................. This section implement the above section in serial way ...............................................................................*/ /* Serial computation */ gettimeofday(&TimeValue1_Start, &TimeZone1_Start); /* .......Read the Input file ......*/ if ((fp1 = fopen ("./data/mdata.inp", "r")) == NULL){ printf("\n\t\t Failed to open the file\n "); MatrixFileStatus = 0; } if(MatrixFileStatus != 0) { fscanf(fp1,"%d%d\n",&NoofRows,&NoofCols); /* ...Allocate memory and read Matrix from file .......*/ Matrix = (float **)malloc(NoofRows*sizeof(float *)); for(irow=0 ;irow<NoofRows; irow++){ Matrix[irow] = (float *)malloc(NoofCols*sizeof(float)); for(icol=0; icol<NoofCols; icol++) { fscanf(fp1,"%f",&Matrix[irow][icol]); } } fclose(fp1); free(Matrix); } Matrix_one = (float **)malloc( matrowsize *sizeof(float *)); Vector = (float *)malloc( vectorsize *sizeof(float)); Result_Vector = (float *)malloc(matrowsize *sizeof(float)); for(irow=0 ; irow<matrowsize ; irow++) { Matrix_one[irow] = (float *)malloc(matcolsize * sizeof(float)); for(icol=0; icol<matcolsize; icol++) { Matrix_one[irow][icol] = cos(irow*1.0) ; Vector[irow] = cos(icol*1.0) ; Result_Vector[irow] = 0.0; } } for(icol = 0; icol < matrowsize ; icol++) for(irow = 0 ; irow < matcolsize; irow++) { Result_Vector[icol] = Result_Vector[icol] + Vector[irow] * Matrix_one[irow][icol]; } free(Matrix_one); free(Vector); free(Result_Vector); gettimeofday(&TimeValue1_Final, &TimeZone1_Final); time_start = 0; time_end = 0; time_start = TimeValue1_Start.tv_sec * 1000000 + TimeValue1_Start.tv_usec; time_end = TimeValue1_Final.tv_sec * 1000000 + TimeValue1_Final.tv_usec; time_overhead2 = (time_end - time_start)/1000000.0; printf("\n\n\t\t Time Taken (By Parallel Computation ) :%lf ", time_overhead1); printf("\n\t\t Time Taken (By Serial Computation ) :%lf \n ", time_overhead2); printf("\n\t\t..........................................................................\n"); }
graph.c
/*! * \file * * \brief Various routines with dealing with sparse graphs * * \author George Karypis * \version\verbatim $Id: graph.c 13328 2012-12-31 14:57:40Z karypis $ \endverbatim */ #include <GKlib.h> #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a graph and initializes it \returns the allocated graph. The various fields are set to NULL. */ /**************************************************************************/ gk_graph_t *gk_graph_Create() { gk_graph_t *graph; graph = (gk_graph_t *)gk_malloc(sizeof(gk_graph_t), "gk_graph_Create: graph"); gk_graph_Init(graph); return graph; } /*************************************************************************/ /*! Initializes the graph. \param graph is the graph to be initialized. */ /*************************************************************************/ void gk_graph_Init(gk_graph_t *graph) { memset(graph, 0, sizeof(gk_graph_t)); graph->nvtxs = -1; } /*************************************************************************/ /*! Frees all the memory allocated for a graph. \param graph is the graph to be freed. */ /*************************************************************************/ void gk_graph_Free(gk_graph_t **graph) { if (*graph == NULL) return; gk_graph_FreeContents(*graph); gk_free((void **)graph, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the graph's different fields and sets them to NULL. \param graph is the graph whose contents will be freed. */ /*************************************************************************/ void gk_graph_FreeContents(gk_graph_t *graph) { gk_free((void *)&graph->xadj, &graph->adjncy, &graph->iadjwgt, &graph->fadjwgt, &graph->ivwgts, &graph->fvwgts, &graph->ivsizes, &graph->fvsizes, &graph->vlabels, LTERM); } /**************************************************************************/ /*! Reads a sparse graph from the supplied file \param filename is the file that stores the data. \param format is the graph format. The supported values are: GK_GRAPH_FMT_METIS. \param isfewgts is 1 if the edge-weights should be read as floats \param isfvwgts is 1 if the vertex-weights should be read as floats \param isfvsizes is 1 if the vertex-sizes should be read as floats \returns the graph that was read. */ /**************************************************************************/ gk_graph_t *gk_graph_Read(char *filename, int format, int isfewgts, int isfvwgts, int isfvsizes) { ssize_t i, k, l; size_t nfields, nvtxs, nedges, fmt, ncon, lnlen; int32_t ival; float fval; int readsizes=0, readwgts=0, readvals=0, numbering=0; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin=NULL; gk_graph_t *graph=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == GK_GRAPH_FMT_METIS) { fpin = gk_fopen(filename, "r", "gk_graph_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nvtxs, &nedges, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); nedges *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { gk_errexit(SIGERR, "Unrecognized format: %d\n", format); } graph = gk_graph_Create(); graph->nvtxs = nvtxs; graph->xadj = gk_zmalloc(nvtxs+1, "gk_graph_Read: xadj"); graph->adjncy = gk_i32malloc(nedges, "gk_graph_Read: adjncy"); if (readvals) { if (isfewgts) graph->fadjwgt = gk_fmalloc(nedges, "gk_graph_Read: fadjwgt"); else graph->iadjwgt = gk_i32malloc(nedges, "gk_graph_Read: iadjwgt"); } if (readsizes) { if (isfvsizes) graph->fvsizes = gk_fmalloc(nvtxs, "gk_graph_Read: fvsizes"); else graph->ivsizes = gk_i32malloc(nvtxs, "gk_graph_Read: ivsizes"); } if (readwgts) { if (isfvwgts) graph->fvwgts = gk_fmalloc(nvtxs*ncon, "gk_graph_Read: fvwgts"); else graph->ivwgts = gk_i32malloc(nvtxs*ncon, "gk_graph_Read: ivwgts"); } /*---------------------------------------------------------------------- * Read the sparse graph file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (graph->xadj[0]=0, k=0, i=0; i < (ssize_t)nvtxs; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Pregraphure end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { if (isfvsizes) { #ifdef __MSC__ graph->fvsizes[i] = (float)strtod(head, &tail); #else graph->fvsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (graph->fvsizes[i] < 0) gk_errexit(SIGERR, "The size for vertex %zd must be >= 0\n", i+1); } else { graph->ivsizes[i] = strtol(head, &tail, 0); if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (graph->ivsizes[i] < 0) gk_errexit(SIGERR, "The size for vertex %zd must be >= 0\n", i+1); } head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l< (ssize_t)ncon; l++) { if (isfvwgts) { #ifdef __MSC__ graph->fvwgts[i*ncon+l] = (float)strtod(head, &tail); #else graph->fvwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (graph->fvwgts[i*ncon+l] < 0) gk_errexit(SIGERR, "The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); } else { graph->ivwgts[i*ncon+l] = strtol(head, &tail, 0); if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (graph->ivwgts[i*ncon+l] < 0) gk_errexit(SIGERR, "The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); } head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((graph->adjncy[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); if (readvals) { if (isfewgts) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for edge! Vertex:%zd, NNZ:%zd\n", i, k); graph->fadjwgt[k] = fval; } else { ival = strtol(head, &tail, 0); if (tail == head) gk_errexit(SIGERR, "Value could not be found for edge! Vertex:%zd, NNZ:%zd\n", i, k); graph->iadjwgt[k] = ival; } head = tail; } k++; } graph->xadj[i+1] = k; } if (k != (ssize_t)nedges) gk_errexit(SIGERR, "gk_graph_Read: Something wrong with the number of edges in " "the input file. nedges=%zd, Actualnedges=%zd.\n", nedges, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return graph; } /**************************************************************************/ /*! Writes a graph into a file. \param graph is the graph to be written, \param filename is the name of the output file. \param format is one of GK_GRAPH_FMT_METIS specifying the format of the output file. */ /**************************************************************************/ void gk_graph_Write(gk_graph_t *graph, char *filename, int format) { ssize_t i, j; int hasvwgts, hasvsizes, hasewgts; FILE *fpout; if (format != GK_GRAPH_FMT_METIS) gk_errexit(SIGERR, "Unknown file format. %d\n", format); if (filename) fpout = gk_fopen(filename, "w", "gk_graph_Write: fpout"); else fpout = stdout; hasewgts = (graph->iadjwgt || graph->fadjwgt); hasvwgts = (graph->ivwgts || graph->fvwgts); hasvsizes = (graph->ivsizes || graph->fvsizes); /* write the header line */ fprintf(fpout, "%d %zd", graph->nvtxs, graph->xadj[graph->nvtxs]/2); if (hasvwgts || hasvsizes || hasewgts) fprintf(fpout, " %d%d%d", hasvsizes, hasvwgts, hasewgts); fprintf(fpout, "\n"); for (i=0; i<graph->nvtxs; i++) { if (hasvsizes) { if (graph->ivsizes) fprintf(fpout, " %d", graph->ivsizes[i]); else fprintf(fpout, " %f", graph->fvsizes[i]); } if (hasvwgts) { if (graph->ivwgts) fprintf(fpout, " %d", graph->ivwgts[i]); else fprintf(fpout, " %f", graph->fvwgts[i]); } for (j=graph->xadj[i]; j<graph->xadj[i+1]; j++) { fprintf(fpout, " %d", graph->adjncy[j]+1); if (hasewgts) { if (graph->iadjwgt) fprintf(fpout, " %d", graph->iadjwgt[j]); else fprintf(fpout, " %f", graph->fadjwgt[j]); } } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } /*************************************************************************/ /*! Returns a copy of a graph. \param graph is the graph to be duplicated. \returns the newly created copy of the graph. */ /**************************************************************************/ gk_graph_t *gk_graph_Dup(gk_graph_t *graph) { gk_graph_t *ngraph; ngraph = gk_graph_Create(); ngraph->nvtxs = graph->nvtxs; /* copy the adjacency structure */ if (graph->xadj) ngraph->xadj = gk_zcopy(graph->nvtxs+1, graph->xadj, gk_zmalloc(graph->nvtxs+1, "gk_graph_Dup: xadj")); if (graph->ivwgts) ngraph->ivwgts = gk_i32copy(graph->nvtxs, graph->ivwgts, gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivwgts")); if (graph->ivsizes) ngraph->ivsizes = gk_i32copy(graph->nvtxs, graph->ivsizes, gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivsizes")); if (graph->vlabels) ngraph->vlabels = gk_i32copy(graph->nvtxs, graph->vlabels, gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivlabels")); if (graph->fvwgts) ngraph->fvwgts = gk_fcopy(graph->nvtxs, graph->fvwgts, gk_fmalloc(graph->nvtxs, "gk_graph_Dup: fvwgts")); if (graph->fvsizes) ngraph->fvsizes = gk_fcopy(graph->nvtxs, graph->fvsizes, gk_fmalloc(graph->nvtxs, "gk_graph_Dup: fvsizes")); if (graph->adjncy) ngraph->adjncy = gk_i32copy(graph->xadj[graph->nvtxs], graph->adjncy, gk_i32malloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: adjncy")); if (graph->iadjwgt) ngraph->iadjwgt = gk_i32copy(graph->xadj[graph->nvtxs], graph->iadjwgt, gk_i32malloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: iadjwgt")); if (graph->fadjwgt) ngraph->fadjwgt = gk_fcopy(graph->xadj[graph->nvtxs], graph->fadjwgt, gk_fmalloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: fadjwgt")); return ngraph; } /*************************************************************************/ /*! Returns a subgraph containing a set of consecutive vertices. \param graph is the original graph. \param vstart is the starting vertex. \param nvtxs is the number of vertices from vstart to extract. \returns the newly created subgraph. */ /**************************************************************************/ gk_graph_t *gk_graph_ExtractSubgraph(gk_graph_t *graph, int vstart, int nvtxs) { ssize_t i; gk_graph_t *ngraph; if (vstart+nvtxs > graph->nvtxs) return NULL; ngraph = gk_graph_Create(); ngraph->nvtxs = nvtxs; /* copy the adjancy structure */ if (graph->xadj) ngraph->xadj = gk_zcopy(nvtxs+1, graph->xadj+vstart, gk_zmalloc(nvtxs+1, "gk_graph_ExtractSubgraph: xadj")); for (i=nvtxs; i>=0; i--) ngraph->xadj[i] -= ngraph->xadj[0]; ASSERT(ngraph->xadj[0] == 0); if (graph->ivwgts) ngraph->ivwgts = gk_i32copy(nvtxs, graph->ivwgts+vstart, gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: ivwgts")); if (graph->ivsizes) ngraph->ivsizes = gk_i32copy(nvtxs, graph->ivsizes+vstart, gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: ivsizes")); if (graph->vlabels) ngraph->vlabels = gk_i32copy(nvtxs, graph->vlabels+vstart, gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: vlabels")); if (graph->fvwgts) ngraph->fvwgts = gk_fcopy(nvtxs, graph->fvwgts+vstart, gk_fmalloc(nvtxs, "gk_graph_ExtractSubgraph: fvwgts")); if (graph->fvsizes) ngraph->fvsizes = gk_fcopy(nvtxs, graph->fvsizes+vstart, gk_fmalloc(nvtxs, "gk_graph_ExtractSubgraph: fvsizes")); ASSERT(ngraph->xadj[nvtxs] == graph->xadj[vstart+nvtxs]-graph->xadj[vstart]); if (graph->adjncy) ngraph->adjncy = gk_i32copy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], graph->adjncy+graph->xadj[vstart], gk_i32malloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], "gk_graph_ExtractSubgraph: adjncy")); if (graph->iadjwgt) ngraph->iadjwgt = gk_i32copy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], graph->iadjwgt+graph->xadj[vstart], gk_i32malloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], "gk_graph_ExtractSubgraph: iadjwgt")); if (graph->fadjwgt) ngraph->fadjwgt = gk_fcopy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], graph->fadjwgt+graph->xadj[vstart], gk_fmalloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart], "gk_graph_ExtractSubgraph: fadjwgt")); return ngraph; } /*************************************************************************/ /*! Returns a graph that has been reordered according to the permutation. \param[IN] graph is the graph to be re-ordered. \param[IN] perm is the new ordering of the graph's vertices \param[IN] iperm is the original ordering of the re-ordered graph's vertices \returns the newly created copy of the graph. \note Either perm or iperm can be NULL but not both. */ /**************************************************************************/ gk_graph_t *gk_graph_Reorder(gk_graph_t *graph, int32_t *perm, int32_t *iperm) { ssize_t j, jj, *xadj; int i, u, v, nvtxs; int freeperm=0, freeiperm=0; int32_t *adjncy; gk_graph_t *ngraph; if (perm == NULL && iperm == NULL) return NULL; ngraph = gk_graph_Create(); ngraph->nvtxs = nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* allocate memory for the different structures that are present in graph */ if (graph->xadj) ngraph->xadj = gk_zmalloc(nvtxs+1, "gk_graph_Reorder: xadj"); if (graph->ivwgts) ngraph->ivwgts = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivwgts"); if (graph->ivsizes) ngraph->ivsizes = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivsizes"); if (graph->vlabels) ngraph->vlabels = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivlabels"); if (graph->fvwgts) ngraph->fvwgts = gk_fmalloc(nvtxs, "gk_graph_Reorder: fvwgts"); if (graph->fvsizes) ngraph->fvsizes = gk_fmalloc(nvtxs, "gk_graph_Reorder: fvsizes"); if (graph->adjncy) ngraph->adjncy = gk_i32malloc(graph->xadj[nvtxs], "gk_graph_Reorder: adjncy"); if (graph->iadjwgt) ngraph->iadjwgt = gk_i32malloc(graph->xadj[nvtxs], "gk_graph_Reorder: iadjwgt"); if (graph->fadjwgt) ngraph->fadjwgt = gk_fmalloc(graph->xadj[nvtxs], "gk_graph_Reorder: fadjwgt"); /* create perm/iperm if not provided */ if (perm == NULL) { freeperm = 1; perm = gk_i32malloc(nvtxs, "gk_graph_Reorder: perm"); for (i=0; i<nvtxs; i++) perm[iperm[i]] = i; } if (iperm == NULL) { freeiperm = 1; iperm = gk_i32malloc(nvtxs, "gk_graph_Reorder: iperm"); for (i=0; i<nvtxs; i++) iperm[perm[i]] = i; } /* fill-in the information of the re-ordered graph */ ngraph->xadj[0] = jj = 0; for (v=0; v<nvtxs; v++) { u = iperm[v]; for (j=xadj[u]; j<xadj[u+1]; j++, jj++) { ngraph->adjncy[jj] = perm[adjncy[j]]; if (graph->iadjwgt) ngraph->iadjwgt[jj] = graph->iadjwgt[j]; if (graph->fadjwgt) ngraph->fadjwgt[jj] = graph->fadjwgt[j]; } if (graph->ivwgts) ngraph->ivwgts[v] = graph->ivwgts[u]; if (graph->fvwgts) ngraph->fvwgts[v] = graph->fvwgts[u]; if (graph->ivsizes) ngraph->ivsizes[v] = graph->ivsizes[u]; if (graph->fvsizes) ngraph->fvsizes[v] = graph->fvsizes[u]; if (graph->vlabels) ngraph->vlabels[v] = graph->vlabels[u]; ngraph->xadj[v+1] = jj; } /* free memory */ if (freeperm) gk_free((void **)&perm, LTERM); if (freeiperm) gk_free((void **)&iperm, LTERM); return ngraph; } /*************************************************************************/ /*! This function finds the connected components in a graph. \param graph is the graph structure \param cptr is the ptr structure of the CSR representation of the components. The length of this vector must be graph->nvtxs+1. \param cind is the indices structure of the CSR representation of the components. The length of this vector must be graph->nvtxs. \returns the number of components that it found. \note The cptr and cind parameters can be NULL, in which case only the number of connected components is returned. */ /*************************************************************************/ int gk_graph_FindComponents(gk_graph_t *graph, int32_t *cptr, int32_t *cind) { ssize_t i, j, k, nvtxs, first, last, ntodo, ncmps; ssize_t *xadj; int32_t *adjncy, *pos, *todo; int32_t mustfree_ccsr=0; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* Deal with NULL supplied cptr/cind vectors */ if (cptr == NULL) { cptr = gk_i32malloc(nvtxs+1, "gk_graph_FindComponents: cptr"); cind = gk_i32malloc(nvtxs, "gk_graph_FindComponents: cind"); mustfree_ccsr = 1; } /* The list of vertices that have not been touched yet. The valid entries are from [0..ntodo). */ todo = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: todo")); /* For a vertex that has not been visited, pos[i] is the position in the todo list that this vertex is stored. If a vertex has been visited, pos[i] = -1. */ pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: pos")); /* Find the connected componends */ ncmps = -1; ntodo = nvtxs; /* All vertices have not been visited */ first = last = 0; /* Point to the first and last vertices that have been touched but not explored. These vertices are stored in cind[first]...cind[last-1]. */ while (ntodo > 0) { if (first == last) { /* Find another starting vertex */ cptr[++ncmps] = first; /* Mark the end of the current CC */ ASSERT(pos[todo[0]] != -1); i = todo[0]; cind[last++] = i; pos[i] = -1; } i = cind[first++]; /* Get the first visited but unexplored vertex */ /* Remove i from the todo list and put the last item in the todo list at the position that i was so that the todo list will be consequtive. The pos[] array is updated accordingly to keep track the location of the vertices in the todo[] list. */ k = pos[i]; j = todo[k] = todo[--ntodo]; pos[j] = k; for (j=xadj[i]; j<xadj[i+1]; j++) { k = adjncy[j]; if (pos[k] != -1) { cind[last++] = k; pos[k] = -1; } } } cptr[++ncmps] = first; if (mustfree_ccsr) gk_free((void **)&cptr, &cind, LTERM); gk_free((void **)&pos, &todo, LTERM); return (int) ncmps; } /*************************************************************************/ /*! This function computes a permutation of the vertices based on a breadth-first-traversal. It can be used for re-ordering the graph to reduce its bandwidth for better cache locality. The algorithm used is a simplified version of the method used to find the connected components. \param[IN] graph is the graph structure \param[IN] v is the starting vertex of the BFS \param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph. \param[OUT] iperm[i] stores the ID of the vertex that corresponds to the ith vertex in the re-ordered graph. \note The perm or iperm (but not both) can be NULL, at which point, the corresponding arrays are not returned. Though the program works fine when both are NULL, doing that is not smart. The returned arrays should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_ComputeBFSOrdering(gk_graph_t *graph, int v, int32_t **r_perm, int32_t **r_iperm) { ssize_t j, *xadj; int i, k, nvtxs, first, last; int32_t *adjncy, *cot, *pos; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* This array will function like pos + touched of the CC method */ pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_ComputeBFSOrdering: pos")); /* This array ([C]losed[O]pen[T]odo => cot) serves three purposes. Positions from [0...first) is the current iperm[] vector of the explored vertices; Positions from [first...last) is the OPEN list (i.e., visited vertices); Positions from [last...nvtxs) is the todo list. */ cot = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_ComputeBFSOrdering: cot")); /* put v at the front of the todo list */ pos[0] = cot[0] = v; pos[v] = cot[v] = 0; /* Find the connected componends induced by the partition */ first = last = 0; while (first < nvtxs) { if (first == last) { /* Find another starting vertex */ k = cot[last]; ASSERT(pos[k] != -1); pos[k] = -1; /* mark node as being visited */ last++; } i = cot[first++]; /* the ++ advances the explored vertices */ for (j=xadj[i]; j<xadj[i+1]; j++) { k = adjncy[j]; /* if a node has already been visited, its perm[] will be -1 */ if (pos[k] != -1) { /* pos[k] is the location within iperm of where k resides (it is in the 'todo' part); It is placed in that location cot[last] (end of OPEN list) that we are about to overwrite and update pos[cot[last]] to reflect that. */ cot[pos[k]] = cot[last]; /* put the head of the todo list to where k was in the todo list */ pos[cot[last]] = pos[k]; /* update perm to reflect the move */ cot[last++] = k; /* put node at the end of the OPEN list */ pos[k] = -1; /* mark node as being visited */ } } } /* time to decide what to return */ if (r_perm != NULL) { /* use the 'pos' array to build the perm array */ for (i=0; i<nvtxs; i++) pos[cot[i]] = i; *r_perm = pos; pos = NULL; } if (r_iperm != NULL) { *r_iperm = cot; cot = NULL; } /* cleanup memory */ gk_free((void **)&pos, &cot, LTERM); } /*************************************************************************/ /*! This function computes a permutation of the vertices based on a best-first-traversal. It can be used for re-ordering the graph to reduce its bandwidth for better cache locality. \param[IN] graph is the graph structure. \param[IN] v is the starting vertex of the best-first traversal. \param[IN] type indicates the criteria to use to measure the 'bestness' of a vertex. \param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph. \param[OUT] iperm[i] stores the ID of the vertex that corresponds to the ith vertex in the re-ordered graph. \note The perm or iperm (but not both) can be NULL, at which point, the corresponding arrays are not returned. Though the program works fine when both are NULL, doing that is not smart. The returned arrays should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_ComputeBestFOrdering0(gk_graph_t *graph, int v, int type, int32_t **r_perm, int32_t **r_iperm) { ssize_t j, jj, *xadj; int i, k, u, nvtxs; int32_t *adjncy, *perm, *degrees, *minIDs, *open; gk_i32pq_t *queue; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* the degree of the vertices in the closed list */ degrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: degrees"); /* the minimum vertex ID of an open vertex to the closed list */ minIDs = gk_i32smalloc(nvtxs, nvtxs+1, "gk_graph_ComputeBestFOrdering: minIDs"); /* the open list */ open = gk_i32malloc(nvtxs, "gk_graph_ComputeBestFOrdering: open"); /* if perm[i] >= 0, then perm[i] is the order of vertex i; otherwise perm[i] == -1. */ perm = gk_i32smalloc(nvtxs, -1, "gk_graph_ComputeBestFOrdering: perm"); /* create the queue and put everything in it */ queue = gk_i32pqCreate(nvtxs); for (i=0; i<nvtxs; i++) gk_i32pqInsert(queue, i, 0); gk_i32pqUpdate(queue, v, 1); open[0] = v; /* start processing the nodes */ for (i=0; i<nvtxs; i++) { if ((v = gk_i32pqGetTop(queue)) == -1) gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i); if (perm[v] != -1) gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v); perm[v] = i; for (j=xadj[v]; j<xadj[v+1]; j++) { u = adjncy[j]; if (perm[u] == -1) { degrees[u]++; minIDs[u] = (i < minIDs[u] ? i : minIDs[u]); switch (type) { case 1: /* DFS */ gk_i32pqUpdate(queue, u, 1); break; case 2: /* Max in closed degree */ gk_i32pqUpdate(queue, u, degrees[u]); break; case 3: /* Sum of orders in closed list */ for (k=0, jj=xadj[u]; jj<xadj[u+1]; jj++) { if (perm[adjncy[jj]] != -1) k += perm[adjncy[jj]]; } gk_i32pqUpdate(queue, u, k); break; case 4: /* Sum of order-differences (w.r.t. current number) in closed list (updated once in a while) */ for (k=0, jj=xadj[u]; jj<xadj[u+1]; jj++) { if (perm[adjncy[jj]] != -1) k += (i-perm[adjncy[jj]]); } gk_i32pqUpdate(queue, u, k); break; default: ; } } } } /* time to decide what to return */ if (r_perm != NULL) { *r_perm = perm; perm = NULL; } if (r_iperm != NULL) { /* use the 'degrees' array to build the iperm array */ for (i=0; i<nvtxs; i++) degrees[perm[i]] = i; *r_iperm = degrees; degrees = NULL; } /* cleanup memory */ gk_i32pqDestroy(queue); gk_free((void **)&perm, &degrees, &minIDs, &open, LTERM); } /*************************************************************************/ /*! This function computes a permutation of the vertices based on a best-first-traversal. It can be used for re-ordering the graph to reduce its bandwidth for better cache locality. \param[IN] graph is the graph structure. \param[IN] v is the starting vertex of the best-first traversal. \param[IN] type indicates the criteria to use to measure the 'bestness' of a vertex. \param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph. \param[OUT] iperm[i] stores the ID of the vertex that corresponds to the ith vertex in the re-ordered graph. \note The perm or iperm (but not both) can be NULL, at which point, the corresponding arrays are not returned. Though the program works fine when both are NULL, doing that is not smart. The returned arrays should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_ComputeBestFOrdering(gk_graph_t *graph, int v, int type, int32_t **r_perm, int32_t **r_iperm) { ssize_t j, *xadj; int i, u, nvtxs, nopen, ntodo; int32_t *adjncy, *perm, *degrees, *wdegrees, *sod, *level, *ot, *pos; gk_i32pq_t *queue; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; /* the degree of the vertices in the closed list */ degrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: degrees"); /* the weighted degree of the vertices in the closed list for type==3 */ wdegrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: wdegrees"); /* the sum of differences for type==4 */ sod = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: sod"); /* the encountering level of a vertex type==5 */ level = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: level"); /* The open+todo list of vertices. The vertices from [0..nopen] are the open vertices. The vertices from [nopen..ntodo) are the todo vertices. */ ot = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: ot")); /* For a vertex that has not been explored, pos[i] is the position in the ot list. */ pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: pos")); /* if perm[i] >= 0, then perm[i] is the order of vertex i; otherwise perm[i] == -1. */ perm = gk_i32smalloc(nvtxs, -1, "gk_graph_ComputeBestFOrdering: perm"); /* create the queue and put the starting vertex in it */ queue = gk_i32pqCreate(nvtxs); gk_i32pqInsert(queue, v, 1); /* put v at the front of the open list */ pos[0] = ot[0] = v; pos[v] = ot[v] = 0; nopen = 1; ntodo = nvtxs; /* start processing the nodes */ for (i=0; i<nvtxs; i++) { if (nopen == 0) { /* deal with non-connected graphs */ gk_i32pqInsert(queue, ot[0], 1); nopen++; } if ((v = gk_i32pqGetTop(queue)) == -1) gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i); if (perm[v] != -1) gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v); perm[v] = i; if (ot[pos[v]] != v) gk_errexit(SIGERR, "Something went wrong [ot[pos[%d]]!=%d.\n", v, v); if (pos[v] >= nopen) gk_errexit(SIGERR, "The position of v is not in open list. pos[%d]=%d is >=%d.\n", v, pos[v], nopen); /* remove v from the open list and re-arrange the todo part of the list */ ot[pos[v]] = ot[nopen-1]; pos[ot[nopen-1]] = pos[v]; if (ntodo > nopen) { ot[nopen-1] = ot[ntodo-1]; pos[ot[ntodo-1]] = nopen-1; } nopen--; ntodo--; for (j=xadj[v]; j<xadj[v+1]; j++) { u = adjncy[j]; if (perm[u] == -1) { /* update ot list, if u is not in the open list by putting it at the end of the open list. */ if (degrees[u] == 0) { ot[pos[u]] = ot[nopen]; pos[ot[nopen]] = pos[u]; ot[nopen] = u; pos[u] = nopen; nopen++; level[u] = level[v]+1; gk_i32pqInsert(queue, u, 0); } /* update the in-closed degree */ degrees[u]++; /* update the queues based on the type */ switch (type) { case 1: /* DFS */ gk_i32pqUpdate(queue, u, 1000*(i+1)+degrees[u]); break; case 2: /* Max in closed degree */ gk_i32pqUpdate(queue, u, degrees[u]); break; case 3: /* Sum of orders in closed list */ wdegrees[u] += i; gk_i32pqUpdate(queue, u, wdegrees[u]); break; case 4: /* Sum of order-differences */ /* this is handled at the end of the loop */ ; break; case 5: /* BFS with in degree priority */ gk_i32pqUpdate(queue, u, -(1000*level[u] - degrees[u])); break; case 6: /* Hybrid of 1+2 */ gk_i32pqUpdate(queue, u, (i+1)*degrees[u]); break; default: ; } } } if (type == 4) { /* update all the vertices in the open list */ for (j=0; j<nopen; j++) { u = ot[j]; if (perm[u] != -1) gk_errexit(SIGERR, "For i=%d, the open list contains a closed vertex: ot[%zd]=%d, perm[%d]=%d.\n", i, j, u, u, perm[u]); sod[u] += degrees[u]; if (i<1000 || i%25==0) gk_i32pqUpdate(queue, u, sod[u]); } } /* for (j=0; j<ntodo; j++) { if (pos[ot[j]] != j) gk_errexit(SIGERR, "pos[ot[%zd]] != %zd.\n", j, j); } */ } /* time to decide what to return */ if (r_perm != NULL) { *r_perm = perm; perm = NULL; } if (r_iperm != NULL) { /* use the 'degrees' array to build the iperm array */ for (i=0; i<nvtxs; i++) degrees[perm[i]] = i; *r_iperm = degrees; degrees = NULL; } /* cleanup memory */ gk_i32pqDestroy(queue); gk_free((void **)&perm, &degrees, &wdegrees, &sod, &ot, &pos, &level, LTERM); } /*************************************************************************/ /*! This function computes the single-source shortest path lengths from the root node to all the other nodes in the graph. If the graph is not connected then, the sortest part to the vertices in the other components is -1. \param[IN] graph is the graph structure. \param[IN] v is the root of the single-source shortest path computations. \param[IN] type indicates the criteria to use to measure the 'bestness' of a vertex. \param[OUT] sps[i] stores the length of the shortest path from v to vertex i. If no such path exists, then it is -1. Note that the returned array will be either an array of int32_t or an array of floats. The specific type is determined by the existance of non NULL iadjwgt and fadjwgt arrays. If both of these arrays exist, then priority is given to iadjwgt. \note The returned array should be freed with gk_free(). */ /*************************************************************************/ void gk_graph_SingleSourceShortestPaths(gk_graph_t *graph, int v, void **r_sps) { ssize_t *xadj; int i, u, nvtxs; int32_t *adjncy, *inqueue; if (graph->nvtxs <= 0) return; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; inqueue = gk_i32smalloc(nvtxs, 0, "gk_graph_SingleSourceShortestPaths: inqueue"); /* determine if you will be computing using int32_t or float and proceed from there */ if (graph->iadjwgt != NULL) { gk_i32pq_t *queue; int32_t *adjwgt; int32_t *sps; adjwgt = graph->iadjwgt; queue = gk_i32pqCreate(nvtxs); gk_i32pqInsert(queue, v, 0); inqueue[v] = 1; sps = gk_i32smalloc(nvtxs, -1, "gk_graph_SingleSourceShortestPaths: sps"); sps[v] = 0; /* start processing the nodes */ while ((v = gk_i32pqGetTop(queue)) != -1) { inqueue[v] = 2; /* relax the adjacent edges */ for (i=xadj[v]; i<xadj[v+1]; i++) { u = adjncy[i]; if (inqueue[u] == 2) continue; if (sps[u] < 0 || sps[v]+adjwgt[i] < sps[u]) { sps[u] = sps[v]+adjwgt[i]; if (inqueue[u]) gk_i32pqUpdate(queue, u, -sps[u]); else { gk_i32pqInsert(queue, u, -sps[u]); inqueue[u] = 1; } } } } *r_sps = (void *)sps; gk_i32pqDestroy(queue); } else { gk_fpq_t *queue; float *adjwgt; float *sps; adjwgt = graph->fadjwgt; queue = gk_fpqCreate(nvtxs); gk_fpqInsert(queue, v, 0); inqueue[v] = 1; sps = gk_fsmalloc(nvtxs, -1, "gk_graph_SingleSourceShortestPaths: sps"); sps[v] = 0; /* start processing the nodes */ while ((v = gk_fpqGetTop(queue)) != -1) { inqueue[v] = 2; /* relax the adjacent edges */ for (i=xadj[v]; i<xadj[v+1]; i++) { u = adjncy[i]; if (inqueue[u] == 2) continue; if (sps[u] < 0 || sps[v]+adjwgt[i] < sps[u]) { sps[u] = sps[v]+adjwgt[i]; if (inqueue[u]) gk_fpqUpdate(queue, u, -sps[u]); else { gk_fpqInsert(queue, u, -sps[u]); inqueue[u] = 1; } } } } *r_sps = (void *)sps; gk_fpqDestroy(queue); } gk_free((void **)&inqueue, LTERM); } #ifdef XXX /*************************************************************************/ /*! Sorts the adjacency lists in increasing vertex order \param graph the graph itself, */ /**************************************************************************/ void gk_graph_SortAdjacencies(gk_graph_t *graph) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!graph->rowptr) gk_errexit(SIGERR, "Row-based view of the graphrix does not exists.\n"); n = graph->nrows; ptr = graph->rowptr; ind = graph->rowind; val = graph->rowval; break; case GK_CSR_COL: if (!graph->colptr) gk_errexit(SIGERR, "Column-based view of the graphrix does not exists.\n"); n = graph->ncols; ptr = graph->colptr; ind = graph->colind; val = graph->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_graph_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_graph_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Returns a subgraphrix containing a certain set of rows. \param graph is the original graphrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created subgraphrix. */ /**************************************************************************/ gk_graph_t *gk_graph_ExtractRows(gk_graph_t *graph, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_graph_t *ngraph; ngraph = gk_graph_Create(); ngraph->nrows = nrows; ngraph->ncols = graph->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += graph->rowptr[rind[i]+1]-graph->rowptr[rind[i]]; ngraph->rowptr = gk_zmalloc(ngraph->nrows+1, "gk_graph_ExtractPartition: rowptr"); ngraph->rowind = gk_imalloc(nnz, "gk_graph_ExtractPartition: rowind"); ngraph->rowval = gk_fmalloc(nnz, "gk_graph_ExtractPartition: rowval"); ngraph->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowind+graph->rowptr[i], ngraph->rowind+nnz); gk_fcopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowval+graph->rowptr[i], ngraph->rowval+nnz); nnz += graph->rowptr[i+1]-graph->rowptr[i]; ngraph->rowptr[++j] = nnz; } ASSERT(j == ngraph->nrows); return ngraph; } /*************************************************************************/ /*! Returns a subgraphrix corresponding to a specified partitioning of rows. \param graph is the original graphrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created subgraphrix. */ /**************************************************************************/ gk_graph_t *gk_graph_ExtractPartition(gk_graph_t *graph, int *part, int pid) { ssize_t i, j, nnz; gk_graph_t *ngraph; ngraph = gk_graph_Create(); ngraph->nrows = 0; ngraph->ncols = graph->ncols; for (nnz=0, i=0; i<graph->nrows; i++) { if (part[i] == pid) { ngraph->nrows++; nnz += graph->rowptr[i+1]-graph->rowptr[i]; } } ngraph->rowptr = gk_zmalloc(ngraph->nrows+1, "gk_graph_ExtractPartition: rowptr"); ngraph->rowind = gk_imalloc(nnz, "gk_graph_ExtractPartition: rowind"); ngraph->rowval = gk_fmalloc(nnz, "gk_graph_ExtractPartition: rowval"); ngraph->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<graph->nrows; i++) { if (part[i] == pid) { gk_icopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowind+graph->rowptr[i], ngraph->rowind+nnz); gk_fcopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowval+graph->rowptr[i], ngraph->rowval+nnz); nnz += graph->rowptr[i+1]-graph->rowptr[i]; ngraph->rowptr[++j] = nnz; } } ASSERT(j == ngraph->nrows); return ngraph; } /*************************************************************************/ /*! Splits the graphrix into multiple sub-graphrices based on the provided color array. \param graph is the original graphrix. \param color is an array of size equal to the number of non-zeros in the graphrix (row-wise structure). The graphrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of graphrices for each supplied color number. */ /**************************************************************************/ gk_graph_t **gk_graph_Split(gk_graph_t *graph, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_graph_t **sgraphs; nrows = graph->nrows; rowptr = graph->rowptr; rowind = graph->rowind; rowval = graph->rowval; ncolors = gk_imax(rowptr[nrows], color)+1; sgraphs = (gk_graph_t **)gk_malloc(sizeof(gk_graph_t *)*ncolors, "gk_graph_Split: sgraphs"); for (i=0; i<ncolors; i++) { sgraphs[i] = gk_graph_Create(); sgraphs[i]->nrows = graph->nrows; sgraphs[i]->ncols = graph->ncols; sgraphs[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_graph_Split: sgraphs[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) sgraphs[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, sgraphs[i]->rowptr); for (i=0; i<ncolors; i++) { sgraphs[i]->rowind = gk_imalloc(sgraphs[i]->rowptr[nrows], "gk_graph_Split: sgraphs[i]->rowind"); sgraphs[i]->rowval = gk_fmalloc(sgraphs[i]->rowptr[nrows], "gk_graph_Split: sgraphs[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { sgraphs[color[j]]->rowind[sgraphs[color[j]]->rowptr[i]] = rowind[j]; sgraphs[color[j]]->rowval[sgraphs[color[j]]->rowptr[i]] = rowval[j]; sgraphs[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, sgraphs[i]->rowptr); return sgraphs; } /*************************************************************************/ /*! Prunes certain rows/columns of the graphrix. The prunning takes place by analyzing the row structure of the graphrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param graph the graphrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the graphrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned graphrix consisting only of its row-based structure. The input graphrix is not modified. */ /**************************************************************************/ gk_graph_t *gk_graph_Prune(gk_graph_t *graph, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_graph_t *ngraph; ngraph = gk_graph_Create(); nrows = ngraph->nrows = graph->nrows; ncols = ngraph->ncols = graph->ncols; rowptr = graph->rowptr; rowind = graph->rowind; rowval = graph->rowval; nrowptr = ngraph->rowptr = gk_zmalloc(nrows+1, "gk_graph_Prune: nrowptr"); nrowind = ngraph->rowind = gk_imalloc(rowptr[nrows], "gk_graph_Prune: nrowind"); nrowval = ngraph->rowval = gk_fmalloc(rowptr[nrows], "gk_graph_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_graph_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_graph_Free(&ngraph); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return ngraph; } /*************************************************************************/ /*! Normalizes the rows/columns of the graphrix to be unit length. \param graph the graphrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_graph_Normalize(gk_graph_t *graph, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what&GK_CSR_ROW && graph->rowval) { n = graph->nrows; ptr = graph->rowptr; val = graph->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if (what&GK_CSR_COL && graph->colval) { n = graph->ncols; ptr = graph->colptr; val = graph->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; Quantum *p; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; const unsigned char *p; Quantum *q; ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { *(p+1)+=*p; p++; } p++; remaining-=row_size; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { unsigned char *p, *q; ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; unsigned char *p; size_t count, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(image,pixels,count,row_size); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->alpha_trait=BlendPixelTrait; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; ssize_t i; ssize_t count, index, j, number_layers; size=GetLayerInfoSize(psd_info,image); if (size == 0) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t image_list_length; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); image_list_length=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } image_list_length=GetImageListLength(image); } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); if (merged == (Image *) NULL) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; ssize_t i, j; unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; const Quantum *p; ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
ejercicio7.c
/*Para compilar usar (-lrt: real time library): gcc -O2 SumaVectores.c -o SumaVectores -lrt Para ejecutar use: SumaVectoresC longitud */ #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #include <omp.h>// biblioteca donde se encuentra las funciones paralelas //#define PRINTF_ALL // comentar para quitar el printf ... // que imprime todos los componentes //Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ... //tres defines siguientes puede estar descomentado): //#define VECTOR_LOCAL // descomentar para que los vectores sean variables ... // locales (si se supera el tamaño de la pila se ... // generará el error "Violación de Segmento") #define VECTOR_GLOBAL// descomentar para que los vectores sean variables ... // globales (su longitud no estará limitada por el ... // tamaño de la pila del programa) //#define VECTOR_DYNAMIC // descomentar para que los vectores sean variables ... // dinámicas (memoria reutilizable durante la ejecución) #ifdef VECTOR_GLOBAL #define MAX 33554432 //=2^25 double v1[MAX], v2[MAX], v3[MAX]; #endif int main(int argc, char** argv){ int i; double cgt1,cgt2; double ncgt; //para tiempo de ejecución //Leer argumento de entrada (no de componentes del vector) if (argc<2){ printf("Faltan no componentes del vector\n"); exit(-1); } unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B) #ifdef VECTOR_LOCAL double v1[N], v2[N], v3[N]; // Tamaño variable local en tiempo de ejecución ... // disponible en C a partir de actualización C99 #endif #ifdef VECTOR_GLOBAL if (N>MAX) N=MAX; #endif #ifdef VECTOR_DYNAMIC double *v1, *v2, *v3; v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL v3 = (double*) malloc(N*sizeof(double)); if ( (v1==NULL) || (v2==NULL) || (v3==NULL) ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } #endif //Inicializar vectores #pragma omp parallel for for(i=0; i<N; i++){ v1[i] = N*0.1+i*0.1; v2[i] = N*0.1-i*0.1; //los valores dependen de N } cgt1 = omp_get_wtime(); //Calcular suma de vectores #pragma omp parallel for for(i=0; i<N; i++) v3[i] = v1[i] + v2[i]; cgt2 = omp_get_wtime(); ncgt=(cgt2-cgt1)/*/(1.e+9)*/; //Imprimir resultado de la suma y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",ncgt,N); for(i=0; i<N; i++) printf("/ V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n",i,i,i,v1[i],v2[i],v3[i]); #else printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ V1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f) // V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]); #endif #ifdef VECTOR_DYNAMIC free(v1); // libera el espacio reservado para v1 free(v2); // libera el espacio reservado para v2 free(v3); // libera el espacio reservado para v3 #endif return 0; }
convolution_sgemm_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 16u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 16u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 8, opt.workspace_allocator); { int nn_size = size / 8; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += size * 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 8; } } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size * 8; } } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* outptr0 = top_blob.channel(p); __fp16* outptr1 = top_blob.channel(p + 1); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x8_t _bias0 = vld1q_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "mov v28.16b, %10.16b \n" "mov v29.16b, %10.16b \n" "mov v30.16b, %10.16b \n" "mov v31.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 float16x8_t _sum0 = _bias0; for (int q = 0; q < nn; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; tmpptr += 8; } vst1_f16(outptr0, vget_low_f16(_sum0)); vst1_f16(outptr1, vget_high_f16(_sum0)); outptr0 += 4; outptr1 += 4; } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x4_t _bias0 = vld1_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch * maxk; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "mov v28.16b, %8.16b \n" "mov v29.16b, %8.16b \n" "mov v30.16b, %8.16b \n" "mov v31.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch * maxk; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch * maxk; // inch always > 0 float16x4_t _sum0 = _bias0; for (int q = 0; q < nn; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; tmpptr += 8; } vst1_f16(outptr0, _sum0); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack8to4_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8b-8a-maxk-inch/8a-outch/8b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(64 * maxk, inch / 8, outch / 8 + (outch % 8) / 4, (size_t)2u); int q = 0; for (; q + 7 < outch; q += 8) { Mat g0 = kernel_tm.channel(q / 8); for (int p = 0; p + 7 < inch; p += 8) { __fp16* g00 = g0.row<__fp16>(p / 8); for (int k = 0; k < maxk; k++) { for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } for (; q + 3 < outch; q += 4) { Mat g0 = kernel_tm.channel(q / 8 + (q % 8) / 4); for (int p = 0; p + 7 < inch; p += 8) { __fp16* g00 = g0.row<__fp16>(p / 8); for (int k = 0; k < maxk; k++) { for (int i = 0; i < 8; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); float16x8_t _val2 = vld1q_f16(sptr + stride_w * 16); float16x8_t _val3 = vld1q_f16(sptr + stride_w * 24); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); vst1q_f16(ptr + 16, _val2); vst1q_f16(ptr + 24, _val3); sptr += stride_w * 32; ptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); sptr += stride_w * 16; ptr += 16; } for (; j < outw; j++) { float16x8_t _val = vld1q_f16(sptr); vst1q_f16(ptr, _val); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8to4_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
search_function.h
#include "support_classes.h" #include "visited_list_pool.h" using namespace std; struct TripleResult { priority_queue<pair<float, int > > topk; int hops; int dist_calc; int degree; }; void makeStep(vector <uint32_t> &graph_level, const float *query, const float* db, priority_queue<pair<float, int > > &topResults, priority_queue<std::pair<float, int > > &candidateSet, Metric *metric, uint32_t d, int &query_dist_calc, bool &found, int &ef, int &k, VisitedList *vl) { vl_type *massVisited = vl->mass; vl_type currentV = vl->curV; for (int j = 0; j < graph_level.size(); ++j) { int neig_num = graph_level[j]; if (massVisited[neig_num] != currentV) { massVisited[neig_num] = currentV; const float *neig_coord = db + neig_num * d; float dist = metric->Dist(query, neig_coord, d); query_dist_calc++; if (topResults.top().first > dist || topResults.size() < ef) { candidateSet.emplace(-dist, neig_num); found = true; topResults.emplace(dist, neig_num); if (topResults.size() > ef) topResults.pop(); } } } } TripleResult getOneSearchResults(const float *query, const float* db, uint32_t N, uint32_t d, vector<vector <uint32_t> > &main_graph, vector<vector <uint32_t> > &auxiliary_graph, int ef, int k, vector<uint32_t> &inter_points, Metric *metric, VisitedListPool *visitedlistpool, bool use_second_graph, bool llf, uint32_t hops_bound) { std::priority_queue<std::pair<float, int > > topResults; int query_dist_calc = 1; int num_hops = 0; for (int i = 0; i < inter_points.size(); ++i) { std::priority_queue<std::pair<float, int > > candidateSet; const float* start = db + inter_points[i]*d; float dist = metric->Dist(query, start, d); topResults.emplace(dist, inter_points[i]); candidateSet.emplace(-dist, inter_points[i]); VisitedList *vl = visitedlistpool->getFreeVisitedList(); vl_type *massVisited = vl->mass; vl_type currentV = vl->curV; massVisited[inter_points[i]] = currentV; while (!candidateSet.empty()) { std::pair<float, int> curr_el_pair = candidateSet.top(); if (-curr_el_pair.first > topResults.top().first) break; candidateSet.pop(); int curNodeNum = curr_el_pair.second; bool auxiliary_found = false; if (use_second_graph and num_hops < hops_bound) { vector <uint32_t> curAuxiliaryNodeNeighbors = auxiliary_graph[curNodeNum]; makeStep(curAuxiliaryNodeNeighbors, query, db, topResults, candidateSet, metric, d, query_dist_calc, auxiliary_found, ef, k, vl); } if (!(auxiliary_found * llf) or !use_second_graph) { vector <uint32_t> curMainNodeNeighbors = main_graph[curNodeNum]; makeStep(curMainNodeNeighbors, query, db, topResults, candidateSet, metric, d, query_dist_calc, auxiliary_found, ef, k, vl); } num_hops++; } visitedlistpool->releaseVisitedList(vl); } while (topResults.size() > k) { topResults.pop(); } TripleResult ans{topResults, num_hops, query_dist_calc}; return ans; } int getRealNearest(const float* point_q, int k, int d, int d_low, priority_queue<pair<float, int > > &topk, vector<float> &ds, Metric *metric) { const float* point_i = ds.data() + d * topk.top().second; float min_dist = metric->Dist(point_i, point_q, d); int real_topk = topk.top().second; topk.pop(); float dist; while (!topk.empty()) { point_i = ds.data() + d * topk.top().second; dist = metric->Dist(point_i, point_q, d); if (dist < min_dist) { min_dist = dist; real_topk = topk.top().second; } topk.pop(); } return real_topk; } void performTest(vector<vector<uint32_t> > &knn_graph, vector<vector<uint32_t> > &kl_graph, vector<float> &ds, vector<float> &queries, vector<float> &ds_low, vector<float> &queries_low, vector<uint32_t> &truth, int n, int d, int d_low, int n_q, int n_tr, int ef, int k, string graph_name, Metric *metric, const char* output_txt, vector<vector<uint32_t> > inter_points, bool use_second_graph, bool llf, uint32_t hops_bound, int dist_calc_boost, int recheck_size, int number_exper, int number_of_threads) { std::ofstream outfile; outfile.open(output_txt, std::ios_base::app); VisitedListPool *visitedlistpool = new VisitedListPool(1, n); int hops = 0; int dist_calc = 0 + dist_calc_boost * n_q; float acc = 0; float work_time = 0; int num_exp = 0; omp_set_num_threads(number_of_threads); for (int v = 0; v < number_exper; ++v) { num_exp += 1; vector<uint32_t> ans(n_q); StopW stopw = StopW(); #pragma omp parallel for for (int i = 0; i < n_q; ++i) { TripleResult tripletResult; const float* point_q = queries.data() + i * d; const float* point_q_low = queries_low.data() + i * d_low; if (d != d_low) { if (recheck_size > 0) { tripletResult = getOneSearchResults(point_q_low, ds_low.data(), n, d_low, knn_graph, kl_graph, recheck_size, recheck_size, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound); ans[i] = getRealNearest(point_q, k, d, d_low, tripletResult.topk, ds, metric); dist_calc += recheck_size; } else { tripletResult = getOneSearchResults(point_q_low, ds_low.data(), n, d_low, knn_graph, kl_graph, ef, k, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound); while (tripletResult.topk.size() > k) { tripletResult.topk.pop(); } ans[i] = tripletResult.topk.top().second; } } else { tripletResult = getOneSearchResults(point_q, ds.data(), n, d, knn_graph, kl_graph, ef, k, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound); while (tripletResult.topk.size() > k) { tripletResult.topk.pop(); } ans[i] = tripletResult.topk.top().second; } hops += tripletResult.hops; dist_calc += tripletResult.dist_calc; } work_time += stopw.getElapsedTimeMicro(); int print = 0; for (int i = 0; i < n_q; ++i) { acc += ans[i] == truth[i * n_tr]; // part for SIFT dataset bug fixing { const float *point_tr_f = ds.data() + d * truth[i * n_tr]; const float *point_tr_s = ds.data() + d * truth[i * n_tr + 1]; // point_tr_t = ds.data() + d * truth[i * n_tr + 2]; float dist = metric->Dist(point_tr_f, point_tr_s, d); if (dist == 0 and truth[i * n_tr] != truth[i * n_tr + 1]) { acc += ans[i] == truth[i * n_tr + 1]; } } } } cout << "graph_type " << graph_name << " acc " << acc / (num_exp * n_q) << " hops " << hops / (num_exp * n_q) << " dist_calc " << dist_calc / (num_exp * n_q) << " work_time " << work_time / (num_exp * 1e6 * n_q) << endl; outfile << "graph_type " << graph_name << " acc " << acc / (num_exp * n_q) << " hops " << hops / (num_exp * n_q) << " dist_calc " << dist_calc / (num_exp * n_q) << " work_time " << work_time / (num_exp * 1e6 * n_q) << endl; } void performSyntheticTests(int n, int d, int n_q, int n_tr, std::mt19937 random_gen, vector< vector<uint32_t> > &knn, vector< vector<uint32_t> > &kl, vector<float> &db, vector<float> &queries, vector<uint32_t> &truth, const char* output_txt, Metric *metric, string graph_name, bool use_second_graph, bool llf, bool beam_search) { vector<vector<uint32_t> > inter_points(n_q); int num = 0; // uniform_int_distribution<int> uniform_distr(0, n-1); // for (int j=0; j < n_q; ++j) { // num = uniform_distr(random_gen); // inter_points[j].push_back(num); // } vector<int> ef_coeff; vector<int> k_coeff; uint32_t hops_bound = 11; int recheck_size = -1; int knn_size = findGraphAverageDegree(knn); if (beam_search) { vector<int> k_coeff_{knn_size, knn_size, knn_size, knn_size, knn_size, knn_size}; k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end()); } else { vector<int> ef_coeff_{1, 1, 1, 1, 1, 1}; ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end()); } if (d == 3) { if (beam_search) { vector<int> ef_coeff_{10, 15, 20, 25, 30}; ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end()); } else { vector<int> k_coeff_{12, 14, 16, 18, 20}; k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end()); } hops_bound = 11; } else if (d == 5) { if (beam_search) { vector<int> ef_coeff_{7, 10, 15, 22, 25, 30}; ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end()); } else { vector<int> k_coeff_{15, 20, 25, 30, 40, 60}; k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end()); } hops_bound = 7; } else if (d == 9) { if (beam_search) { vector<int> ef_coeff_{5, 8, 15, 25, 30, 35}; ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end()); } else { vector<int> k_coeff_{60, 100, 150, 200, 250, 300}; k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end()); } hops_bound = 5; } else if (d == 17) { if (beam_search) { vector<int> ef_coeff_{10, 40, 70, 100, 130, 160}; ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end()); } else { vector<int> k_coeff_{750, 1000, 1250, 1500, 1750, 2000}; k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end()); } hops_bound = 4; } int exp_size = min(ef_coeff.size(), k_coeff.size()); for (int i=0; i < exp_size; ++i) { vector< vector <uint32_t>> knn_cur = cutKNNbyK(knn, db.data(), k_coeff[i], n, d, metric); performTest(knn_cur, kl, db, queries, db, queries, truth, n, d, d, n_q, n_tr, ef_coeff[i], 1, graph_name, metric, output_txt, inter_points, use_second_graph, llf, hops_bound, 0, recheck_size, 1, omp_get_max_threads()); } } void performRealTests(int n, int d, int d_low, int n_q, int n_tr, vector<int> efs, std::mt19937 random_gen, vector< vector<uint32_t> > &main_graph, vector< vector<uint32_t> > &kl, vector<float> &db, vector<float> &queries, vector<float> &db_low, vector<float> &queries_low, vector<uint32_t> &truth, const char* output_txt, Metric *metric, string graph_name, bool use_second_graph, bool llf, int number_exper, int number_of_threads) { vector<vector<uint32_t> > inter_points(n_q); int inter_points_mult = 1; if (graph_name.substr(0, 4) == "hnsw") { inter_points_mult = 0; // HNSW starts from 0 } int num = 0; uniform_int_distribution<int> uniform_distr(0, n-1); for (int j=0; j < n_q; ++j) { num = uniform_distr(random_gen); inter_points[j].push_back(num * inter_points_mult); } uint32_t hops_bound = 50; for (int i=0; i < efs.size(); ++i) { performTest(main_graph, kl, db, queries, db_low, queries_low, truth, n, d, d_low, n_q, n_tr, efs[i], 1, graph_name, metric, output_txt, inter_points, use_second_graph, llf, hops_bound, 0, efs[i], number_exper, number_of_threads); } } void performNetTest(vector<vector<uint32_t> > &knn_graph, vector<vector<uint32_t> > &kl_graph, vector<float> &ds, vector<float> &queries, vector<float> &ds_low, const Net* net, size_t d_hidden, vector<uint32_t> &truth, int n, int d, int d_low, int n_q, int n_tr, int ef, int k, string graph_name, Metric *metric, const char* output_txt, vector<vector<uint32_t> > inter_points, bool use_second_graph, bool llf, uint32_t hops_bound, int dist_calc_boost, int recheck_size, int number_exper, int number_of_threads) { std::ofstream outfile; outfile.open(output_txt, std::ios_base::app); VisitedListPool *visitedlistpool = new VisitedListPool(1, n); int hops = 0; int dist_calc = 0 + dist_calc_boost * n_q; float acc = 0; float work_time = 0; int num_exp = 0; // int d_hidden = 256; Angular ang = Angular(); L2Metric l2 = L2Metric(); vector<float> zeros(d_low); omp_set_num_threads(number_of_threads); for (int v = 0; v < number_exper; ++v) { num_exp += 1; vector<uint32_t> ans(n_q); StopW stopw = StopW(); //#pragma omp parallel for for (int i = 0; i < n_q; ++i) { TripleResult tripletResult; const float* point_q = queries.data() + i * d; // const float* point_q_low = queries_low.data() + i * d_low; if (d != d_low) { vector<float> queries_low(d_low); GetLowQueryFromNet(net, point_q, queries_low, zeros.data(), d, d_hidden, d_hidden, d_low, &ang, &l2); // const float* point_q_low = queries_low.data(); if (recheck_size > 0) { tripletResult = getOneSearchResults(queries_low.data(), ds_low.data(), n, d_low, knn_graph, kl_graph, recheck_size, recheck_size, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound); ans[i] = getRealNearest(point_q, k, d, d_low, tripletResult.topk, ds, metric); dist_calc += recheck_size; } else { tripletResult = getOneSearchResults(queries_low.data(), ds_low.data(), n, d_low, knn_graph, kl_graph, ef, k, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound); while (tripletResult.topk.size() > k) { tripletResult.topk.pop(); } ans[i] = tripletResult.topk.top().second; } } else { tripletResult = getOneSearchResults(point_q, ds.data(), n, d, knn_graph, kl_graph, ef, k, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound); while (tripletResult.topk.size() > k) { tripletResult.topk.pop(); } ans[i] = tripletResult.topk.top().second; } hops += tripletResult.hops; dist_calc += tripletResult.dist_calc; } work_time += stopw.getElapsedTimeMicro(); int print = 0; for (int i = 0; i < n_q; ++i) { acc += ans[i] == truth[i * n_tr]; // part for SIFT dataset bug fixing { const float *point_tr_f = ds.data() + d * truth[i * n_tr]; const float *point_tr_s = ds.data() + d * truth[i * n_tr + 1]; float dist = metric->Dist(point_tr_f, point_tr_s, d); if (dist == 0 and truth[i * n_tr] != truth[i * n_tr + 1]) { acc += ans[i] == truth[i * n_tr + 1]; } } } } cout << "graph_type " << graph_name << " acc " << acc / (num_exp * n_q) << " hops " << hops / (num_exp * n_q) << " dist_calc " << dist_calc / (num_exp * n_q) << " work_time " << work_time / (num_exp * 1e6 * n_q) << endl; outfile << "graph_type " << graph_name << " acc " << acc / (num_exp * n_q) << " hops " << hops / (num_exp * n_q) << " dist_calc " << dist_calc / (num_exp * n_q) << " work_time " << work_time / (num_exp * 1e6 * n_q) << endl; } void performRealNetTests(int n, int d, int d_low, int n_q, int n_tr, vector<int> efs, std::mt19937 random_gen, vector< vector<uint32_t> > &main_graph, vector< vector<uint32_t> > &kl, vector<float> &db, vector<float> &queries, vector<float> &db_low, const Net* net, size_t d_hidden, vector<uint32_t> &truth, const char* output_txt, Metric *metric, string graph_name, bool use_second_graph, bool llf, int number_exper, int number_of_threads) { vector<vector<uint32_t> > inter_points(n_q); int inter_points_mult = 1; if (graph_name.substr(0, 4) == "hnsw") { inter_points_mult = 0; // HNSW starts from 0 } int num = 0; uniform_int_distribution<int> uniform_distr(0, n-1); for (int j=0; j < n_q; ++j) { num = uniform_distr(random_gen); inter_points[j].push_back(num * inter_points_mult); } uint32_t hops_bound = 50; for (int i=0; i < efs.size(); ++i) { performNetTest(main_graph, kl, db, queries, db_low, net, d_hidden, truth, n, d, d_low, n_q, n_tr, efs[i], 1, graph_name, metric, output_txt, inter_points, use_second_graph, llf, hops_bound, 0, efs[i], number_exper, number_of_threads); } }
GB_unop__identity_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_int64 // op(A') function: GB_unop_tran__identity_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_int64 ( uint32_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wavelet.c
/* Copyright 2014. The Regents of the University of California. * Copyright 2017. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013 Frank Ong <uecker@eecs.berkeley.edu> * 2013-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de> */ /* * md_*-based multi-dimensional wavelet implementation * * - 3 levels (1d, md, md-hierarchical) * - all higher-level code should work for GPU as well * * Bugs: * * - GPU version is not optimized * - memory use could possible be reduced * * Missing: * * - different boundary conditions * (symmetric, periodic, zero) */ #include <stdio.h> #include <complex.h> #include <assert.h> #include <limits.h> #include <stdbool.h> #include "misc/misc.h" #include "misc/debug.h" #include "num/flpmath.h" #include "num/multind.h" #include "num/ops.h" #ifdef USE_CUDA #include "num/gpuops.h" #include "wavelet/wl3-cuda.h" #endif #include "wavelet.h" // layer 1 - 1-dimensional wavelet transform static unsigned int bandsize(unsigned int imsize, unsigned int flen) { return (imsize + flen - 1) / 2; } static complex float* access(const long str[3], complex float* x, long i, long j, long k) { return (void*)x + str[2] * i + str[1] * j + str[0] * k; } static const complex float* caccess(const long str[3], const complex float* x, long i, long j, long k) { return (const void*)x + str[2] * i + str[1] * j + str[0] * k; } static int coord(int l, int x, int flen, int k) { int n = 2 * l + 1 - (flen - 1) + k; if (n < 0) n = -n - 1; if (n >= x) n = x - 1 - (n - x); return n; } static void wavelet_down3(const long dims[3], const long out_str[3], complex float* out, const long in_str[3], const complex float* in, unsigned int flen, const float filter[flen]) { #pragma omp parallel for collapse(3) for (unsigned int i = 0; i < dims[2]; i++) for (unsigned int j = 0; j < bandsize(dims[1], flen); j++) for (unsigned int k = 0; k < dims[0]; k++) { *access(out_str, out, i, j, k) = 0.; for (unsigned int l = 0; l < flen; l++) { int n = coord(j, dims[1], flen, l); *access(out_str, out, i, j, k) += *(caccess(in_str, in, i, n, k)) * filter[flen - l - 1]; } } } static void wavelet_up3(const long dims[3], const long out_str[3], complex float* out, const long in_str[3], const complex float* in, unsigned int flen, const float filter[flen]) { // md_clear2(3, dims, out_str, out, CFL_SIZE); #pragma omp parallel for collapse(3) for (unsigned int i = 0; i < dims[2]; i++) for (unsigned int j = 0; j < dims[1]; j++) for (unsigned int k = 0; k < dims[0]; k++) { // *access(out_str, out, i, j, k) = 0.; for (unsigned int l = ((j + flen / 2 - 0) - (flen - 1)) % 2; l < flen; l += 2) { int n = ((j + flen / 2 - 0) - (flen - 1) + l) / 2; if ((0 <= n) && ((unsigned int)n < bandsize(dims[1], flen))) *access(out_str, out, i, j, k) += *caccess(in_str, in, i, n, k) * filter[flen - l - 1]; } } } void fwt1(unsigned int N, unsigned int d, const long dims[N], const long ostr[N], complex float* low, complex float* hgh, const long istr[N], const complex float* in, const long flen, const float filter[2][2][flen]) { debug_printf(DP_DEBUG4, "fwt1: %d/%d\n", d, N); debug_print_dims(DP_DEBUG4, N, dims); assert(dims[d] >= 2); long odims[N]; md_copy_dims(N, odims, dims); odims[d] = bandsize(dims[d], flen); debug_print_dims(DP_DEBUG4, N, odims); long o = d + 1; long u = N - o; // 0 1 2 3 4 5 6|7 // --d-- * --u--|N // ---o--- assert(d == md_calc_blockdim(d, dims + 0, istr + 0, CFL_SIZE)); assert(u == md_calc_blockdim(u, dims + o, istr + o, CFL_SIZE * md_calc_size(o, dims))); assert(d == md_calc_blockdim(d, odims + 0, ostr + 0, CFL_SIZE)); assert(u == md_calc_blockdim(u, odims + o, ostr + o, CFL_SIZE * md_calc_size(o, odims))); // merge dims long wdims[3] = { md_calc_size(d, dims), dims[d], md_calc_size(u, dims + o) }; long wistr[3] = { CFL_SIZE, istr[d], CFL_SIZE * md_calc_size(o, dims) }; long wostr[3] = { CFL_SIZE, ostr[d], CFL_SIZE * md_calc_size(o, odims) }; #ifdef USE_CUDA if (cuda_ondevice(in)) { assert(cuda_ondevice(low)); assert(cuda_ondevice(hgh)); float* flow = md_gpu_move(1, MD_DIMS(flen), filter[0][0], FL_SIZE); float* fhgh = md_gpu_move(1, MD_DIMS(flen), filter[0][1], FL_SIZE); wl3_cuda_down3(wdims, wostr, low, wistr, in, flen, flow); wl3_cuda_down3(wdims, wostr, hgh, wistr, in, flen, fhgh); md_free(flow); md_free(fhgh); return; } #endif // no clear needed wavelet_down3(wdims, wostr, low, wistr, in, flen, filter[0][0]); wavelet_down3(wdims, wostr, hgh, wistr, in, flen, filter[0][1]); } void iwt1(unsigned int N, unsigned int d, const long dims[N], const long ostr[N], complex float* out, const long istr[N], const complex float* low, const complex float* hgh, const long flen, const float filter[2][2][flen]) { debug_printf(DP_DEBUG4, "ifwt1: %d/%d\n", d, N); debug_print_dims(DP_DEBUG4, N, dims); assert(dims[d] >= 2); long idims[N]; md_copy_dims(N, idims, dims); idims[d] = bandsize(dims[d], flen); debug_print_dims(DP_DEBUG4, N, idims); long o = d + 1; long u = N - o; // 0 1 2 3 4 5 6|7 // --d-- * --u--|N // ---o--- assert(d == md_calc_blockdim(d, dims + 0, ostr + 0, CFL_SIZE)); assert(u == md_calc_blockdim(u, dims + o, ostr + o, CFL_SIZE * md_calc_size(o, dims))); assert(d == md_calc_blockdim(d, idims + 0, istr + 0, CFL_SIZE)); assert(u == md_calc_blockdim(u, idims + o, istr + o, CFL_SIZE * md_calc_size(o, idims))); long wdims[3] = { md_calc_size(d, dims), dims[d], md_calc_size(u, dims + o) }; long wistr[3] = { CFL_SIZE, istr[d], CFL_SIZE * md_calc_size(o, idims) }; long wostr[3] = { CFL_SIZE, ostr[d], CFL_SIZE * md_calc_size(o, dims) }; md_clear(3, wdims, out, CFL_SIZE); // we cannot clear because we merge outputs #ifdef USE_CUDA if (cuda_ondevice(out)) { assert(cuda_ondevice(low)); assert(cuda_ondevice(hgh)); float* flow = md_gpu_move(1, MD_DIMS(flen), filter[1][0], FL_SIZE); float* fhgh = md_gpu_move(1, MD_DIMS(flen), filter[1][1], FL_SIZE); wl3_cuda_up3(wdims, wostr, out, wistr, low, flen, flow); wl3_cuda_up3(wdims, wostr, out, wistr, hgh, flen, fhgh); md_free(flow); md_free(fhgh); return; } #endif wavelet_up3(wdims, wostr, out, wistr, low, flen, filter[1][0]); wavelet_up3(wdims, wostr, out, wistr, hgh, flen, filter[1][1]); } // layer 2 - multi-dimensional wavelet transform static void wavelet_dims_r(unsigned int N, unsigned int n, unsigned int flags, long odims[2 * N], const long dims[N], const long flen) { if (MD_IS_SET(flags, n)) { odims[0 + n] = bandsize(dims[n], flen); odims[N + n] = 2; } if (n > 0) wavelet_dims_r(N, n - 1, flags, odims, dims, flen); } void wavelet_dims(unsigned int N, unsigned int flags, long odims[2 * N], const long dims[N], const long flen) { md_copy_dims(N, odims, dims); md_singleton_dims(N, odims + N); wavelet_dims_r(N, N - 1, flags, odims, dims, flen); } void fwtN(unsigned int N, unsigned int flags, const long shifts[N], const long dims[N], const long ostr[2 * N], complex float* out, const long istr[N], const complex float* in, const long flen, const float filter[2][2][flen]) { long odims[2 * N]; wavelet_dims(N, flags, odims, dims, flen); assert(md_calc_size(2 * N, odims) >= md_calc_size(N, dims)); // FIXME one of these is unnecessary if we use the output complex float* tmpA = md_alloc_sameplace(2 * N, odims, CFL_SIZE, out); complex float* tmpB = md_alloc_sameplace(2 * N, odims, CFL_SIZE, out); long tidims[2 * N]; md_copy_dims(N, tidims, dims); md_singleton_dims(N, tidims + N); long tistrs[2 * N]; md_calc_strides(2 * N, tistrs, tidims, CFL_SIZE); long todims[2 * N]; md_copy_dims(2 * N, todims, tidims); long tostrs[2 * N]; // maybe we should push the randshift into lower levels //md_copy2(N, dims, tistrs, tmpA, istr, in, CFL_SIZE); md_circ_shift2(N, dims, shifts, tistrs, tmpA, istr, in, CFL_SIZE); for (unsigned int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { todims[0 + i] = odims[0 + i]; todims[N + i] = odims[N + i]; md_calc_strides(2 * N, tostrs, todims, CFL_SIZE); fwt1(2 * N, i, tidims, tostrs, tmpB, (void*)tmpB + tostrs[N + i], tistrs, tmpA, flen, filter); md_copy_dims(2 * N, tidims, todims); md_copy_dims(2 * N, tistrs, tostrs); complex float* swap = tmpA; tmpA = tmpB; tmpB = swap; } } md_copy2(2 * N, todims, ostr, out, tostrs, tmpA, CFL_SIZE); md_free(tmpA); md_free(tmpB); } void iwtN(unsigned int N, unsigned int flags, const long shifts[N], const long dims[N], const long ostr[N], complex float* out, const long istr[2 * N], const complex float* in, const long flen, const float filter[2][2][flen]) { long idims[2 * N]; wavelet_dims(N, flags, idims, dims, flen); assert(md_calc_size(2 * N, idims) >= md_calc_size(N, dims)); complex float* tmpA = md_alloc_sameplace(2 * N, idims, CFL_SIZE, out); complex float* tmpB = md_alloc_sameplace(2 * N, idims, CFL_SIZE, out); long tidims[2 * N]; md_copy_dims(2 * N, tidims, idims); long tistrs[2 * N]; md_calc_strides(2 * N, tistrs, tidims, CFL_SIZE); long todims[2 * N]; md_copy_dims(2 * N, todims, tidims); long tostrs[2 * N]; long ishifts[N]; for (unsigned int i = 0; i < N; i++) ishifts[i] = -shifts[i]; md_copy2(2 * N, tidims, tistrs, tmpA, istr, in, CFL_SIZE); for (int i = N - 1; i >= 0; i--) { // run backwards to maintain contigous blocks if (MD_IS_SET(flags, i)) { todims[0 + i] = dims[0 + i]; todims[N + i] = 1; md_calc_strides(2 * N, tostrs, todims, CFL_SIZE); iwt1(2 * N, i, todims, tostrs, tmpB, tistrs, tmpA, (void*)tmpA + tistrs[N + i], flen, filter); md_copy_dims(2 * N, tidims, todims); md_copy_dims(2 * N, tistrs, tostrs); complex float* swap = tmpA; tmpA = tmpB; tmpB = swap; } } //md_copy2(N, dims, ostr, out, tostrs, tmpA, CFL_SIZE); md_circ_shift2(N, dims, ishifts, ostr, out, tostrs, tmpA, CFL_SIZE); md_free(tmpA); md_free(tmpB); } // layer 3 - hierarchical multi-dimensional wavelet transform static long wavelet_filter_flags(unsigned int N, long flags, const long dims[N], const long min[N]) { for (unsigned int i = 0; i < N; i++) if (dims[i] < min[i]) // CHECK flags = MD_CLEAR(flags, i); return flags; } long wavelet_num_levels(unsigned int N, unsigned int flags, const long dims[N], const long min[N], const long flen) { if (0 == flags) return 1; long wdims[2 * N]; wavelet_dims(N, flags, wdims, dims, flen); return 1 + wavelet_num_levels(N, wavelet_filter_flags(N, flags, wdims, min), wdims, min, flen); } static long wavelet_coeffs_r(unsigned int levels, unsigned int N, unsigned int flags, const long dims[N], const long min[N], const long flen) { long wdims[2 * N]; wavelet_dims(N, flags, wdims, dims, flen); long coeffs = md_calc_size(N, wdims); long bands = md_calc_size(N, wdims + N); assert((0 == flags) == (0 == levels)); if (0 == flags) return bands * coeffs; return coeffs * (bands - 1) + wavelet_coeffs_r(levels - 1, N, wavelet_filter_flags(N, flags, wdims, min), wdims, min, flen); } long wavelet_coeffs(unsigned int N, unsigned int flags, const long dims[N], const long min[N], const long flen) { unsigned int levels = wavelet_num_levels(N, flags, dims, min, flen); assert(levels > 0); return wavelet_coeffs_r(levels - 1, N, flags, dims, min, flen); } void wavelet_thresh(unsigned int N, float lambda, unsigned int flags, unsigned int jflags, const long shifts[N], const long dims[N], complex float* out, const complex float* in, const long minsize[N], long flen, const float filter[2][2][flen]) { assert(0 == (flags & jflags)); long wdims[N]; wavelet_coeffs2(N, flags, wdims, dims, minsize, flen); long wstr[N]; md_calc_strides(N, wstr, wdims, CFL_SIZE); complex float* tmp = md_alloc_sameplace(N, wdims, CFL_SIZE, out); long str[N]; md_calc_strides(N, str, dims, CFL_SIZE); fwt2(N, flags, shifts, wdims, wstr, tmp, dims, str, in, minsize, flen, filter); md_zsoftthresh(N, wdims, lambda, jflags, tmp, tmp); iwt2(N, flags, shifts, dims, str, out, wdims, wstr, tmp, minsize, flen, filter); md_free(tmp); } void wavelet_coeffs2(unsigned int N, unsigned int flags, long odims[N], const long dims[N], const long min[N], const long flen) { md_select_dims(N, ~flags, odims, dims); if (0 == flags) return; unsigned int levels = wavelet_num_levels(N, flags, dims, min, flen); assert(levels > 0); long wdims[N]; md_select_dims(N, flags, wdims, dims); // remove unmodified dims unsigned int b = ffs(flags) - 1; odims[b] = wavelet_coeffs_r(levels - 1, N, flags, wdims, min, flen); } static bool wavelet_check_dims(unsigned int N, unsigned int flags, const long dims[N], const long minsize[N]) { for (unsigned int i = 0; i < N; i++) if (MD_IS_SET(flags, i)) if ((minsize[i] <= 2) || (dims[i] < minsize[i])) return false; return true; } static void embed(unsigned int N, unsigned int flags, long ostr[N], const long dims[N], const long str[N]) { unsigned int b = ffs(flags) - 1; long dims1[N]; md_select_dims(N, flags, dims1, dims); md_calc_strides(N, ostr, dims1, str[b]); for (unsigned int i = 0; i < N; i++) if (!MD_IS_SET(flags, i)) ostr[i] = str[i]; } void fwt2(unsigned int N, unsigned int flags, const long shifts[N], const long odims[N], const long ostr[N], complex float* out, const long idims[N], const long istr[N], const complex float* in, const long minsize[N], long flen, const float filter[2][2][flen]) { assert(wavelet_check_dims(N, flags, idims, minsize)); if (0 == flags) { // note: recursion does *not* end here assert(md_check_compat(N, 0u, odims, idims)); md_copy2(N, idims, ostr, out, istr, in, CFL_SIZE); return; } // check output dimensions long odims2[N]; wavelet_coeffs2(N, flags, odims2, idims, minsize, flen); assert(md_check_compat(N, 0u, odims2, odims)); long wdims2[2 * N]; wavelet_dims(N, flags, wdims2, idims, flen); // only consider transform dims... long dims1[N]; md_select_dims(N, flags, dims1, idims); long wdims[2 * N]; wavelet_dims(N, flags, wdims, dims1, flen); long level_coeffs = md_calc_size(2 * N, wdims); // ... which get embedded in dimension b unsigned int b = ffs(flags) - 1; long ostr2[2 * N]; md_calc_strides(2 * N, ostr2, wdims, ostr[b]); // merge with original strides for (unsigned int i = 0; i < N; i++) if (!MD_IS_SET(flags, i)) ostr2[i] = ostr[i]; assert(odims[b] >= level_coeffs); long offset = (odims[b] - level_coeffs) * (ostr[b] / CFL_SIZE); long bands = md_calc_size(N, wdims + N); long coeffs = md_calc_size(N, wdims + 0); debug_printf(DP_DEBUG4, "fwt2: flags:%d lcoeffs:%ld coeffs:%ld (space:%ld) bands:%ld str:%ld off:%ld\n", flags, level_coeffs, coeffs, odims2[b], bands, ostr[b], offset / istr[b]); // subtract coefficients in high band odims2[b] -= (bands - 1) * coeffs; assert(odims2[b] > 0); long shifts0[N]; for (unsigned int i = 0; i < N; i++) shifts0[i] = 0; unsigned int flags2 = wavelet_filter_flags(N, flags, wdims, minsize); assert((0 == offset) == (0u == flags2)); fwtN(N, flags, shifts, idims, ostr2, out + offset, istr, in, flen, filter); if (0 != flags2) { long odims3[N]; wavelet_coeffs2(N, flags2, odims3, wdims2, minsize, flen); long ostr3[N]; embed(N, flags, ostr3, odims3, ostr); fwt2(N, flags2, shifts0, odims3, ostr3, out, wdims2, ostr2, out + offset, minsize, flen, filter); } } void iwt2(unsigned int N, unsigned int flags, const long shifts[N], const long odims[N], const long ostr[N], complex float* out, const long idims[N], const long istr[N], const complex float* in, const long minsize[N], const long flen, const float filter[2][2][flen]) { assert(wavelet_check_dims(N, flags, odims, minsize)); if (0 == flags) { // note: recursion does *not* end here assert(md_check_compat(N, 0u, odims, idims)); md_copy2(N, idims, ostr, out, istr, in, CFL_SIZE); return; } // check input dimensions long idims2[N]; wavelet_coeffs2(N, flags, idims2, odims, minsize, flen); assert(md_check_compat(N, 0u, idims2, idims)); long wdims2[2 * N]; wavelet_dims(N, flags, wdims2, odims, flen); // only consider transform dims... long dims1[N]; md_select_dims(N, flags, dims1, odims); long wdims[2 * N]; wavelet_dims(N, flags, wdims, dims1, flen); long level_coeffs = md_calc_size(2 * N, wdims); // ... which get embedded in dimension b unsigned int b = ffs(flags) - 1; long istr2[2 * N]; md_calc_strides(2 * N, istr2, wdims, istr[b]); // merge with original strides for (unsigned int i = 0; i < N; i++) if (!MD_IS_SET(flags, i)) istr2[i] = istr[i]; assert(idims[b] >= level_coeffs); long offset = (idims[b] - level_coeffs) * (istr[b] / CFL_SIZE); long bands = md_calc_size(N, wdims + N); long coeffs = md_calc_size(N, wdims + 0); // subtract coefficients in high band idims2[b] -= (bands - 1) * coeffs; assert(idims2[b] > 0); debug_printf(DP_DEBUG4, "ifwt2: flags:%d lcoeffs:%ld coeffs:%ld (space:%ld) bands:%ld str:%ld off:%ld\n", flags, level_coeffs, coeffs, idims2[b], bands, istr[b], offset / ostr[b]); // fix me we need temp storage complex float* tmp = md_alloc_sameplace(2 * N, wdims2, CFL_SIZE, out); long tstr[2 * N]; md_calc_strides(2 * N, tstr, wdims2, CFL_SIZE); md_copy2(2 * N, wdims2, tstr, tmp, istr2, in + offset, CFL_SIZE); long shifts0[N]; for (unsigned int i = 0; i < N; i++) shifts0[i] = 0; unsigned int flags2 = wavelet_filter_flags(N, flags, wdims, minsize); assert((0 == offset) == (0u == flags2)); if (0u != flags2) { long idims3[N]; wavelet_coeffs2(N, flags2, idims3, wdims2, minsize, flen); long istr3[N]; embed(N, flags, istr3, idims3, istr); iwt2(N, flags2, shifts0, wdims2, tstr, tmp, idims3, istr3, in, minsize, flen, filter); } iwtN(N, flags, shifts, odims, ostr, out, tstr, tmp, flen, filter); md_free(tmp); } void fwt(unsigned int N, unsigned int flags, const long shifts[N], const long odims[N], complex float* out, const long idims[N], const complex float* in, const long minsize[N], long flen, const float filter[2][2][flen]) { fwt2(N, flags, shifts, odims, MD_STRIDES(N, odims, CFL_SIZE), out, idims, MD_STRIDES(N, idims, CFL_SIZE), in, minsize, flen, filter); } void iwt(unsigned int N, unsigned int flags, const long shifts[N], const long odims[N], complex float* out, const long idims[N], const complex float* in, const long minsize[N], const long flen, const float filter[2][2][flen]) { iwt2(N, flags, shifts, odims, MD_STRIDES(N, odims, CFL_SIZE), out, idims, MD_STRIDES(N, idims, CFL_SIZE), in, minsize, flen, filter); } const float wavelet_haar[2][2][2] = { { { +0.7071067811865475, +0.7071067811865475 }, { -0.7071067811865475, +0.7071067811865475 }, }, { { +0.7071067811865475, +0.7071067811865475 }, { +0.7071067811865475, -0.7071067811865475 }, }, }; const float wavelet_dau2[2][2][4] = { { { -0.1294095225512603, +0.2241438680420134, +0.8365163037378077, +0.4829629131445341 }, { -0.4829629131445341, +0.8365163037378077, -0.2241438680420134, -0.1294095225512603 }, }, { { +0.4829629131445341, +0.8365163037378077, +0.2241438680420134, -0.1294095225512603 }, { -0.1294095225512603, -0.2241438680420134, +0.8365163037378077, -0.4829629131445341 }, }, }; const float wavelet_cdf44[2][2][10] = { { { +0.00000000000000000, +0.03782845550726404 , -0.023849465019556843, -0.11062440441843718 , +0.37740285561283066, +0.85269867900889385, +0.37740285561283066 , -0.11062440441843718 , -0.023849465019556843, +0.03782845550726404 }, { +0.00000000000000000, -0.064538882628697058, +0.040689417609164058, +0.41809227322161724 , -0.7884856164055829, +0.41809227322161724, +0.040689417609164058, -0.064538882628697058, +0.00000000000000000 , +0.00000000000000000 }, }, { { +0.00000000000000000, -0.064538882628697058, -0.040689417609164058, +0.41809227322161724 , +0.7884856164055829, +0.41809227322161724, -0.040689417609164058, -0.064538882628697058, +0.000000000000000000, +0.00000000000000000 }, { +0.00000000000000000, -0.03782845550726404 , -0.023849465019556843, +0.11062440441843718 , +0.37740285561283066, -0.85269867900889385, +0.37740285561283066 , +0.11062440441843718 , -0.023849465019556843, -0.03782845550726404 }, }, };
zgetrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * ******************************************************************************/ int plasma_zgetrf(int m, int n, plasma_complex64_t *pA, int lda, int *ipiv) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imin(m, n) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_getrf(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); // Call the tile async function. plasma_omp_zgetrf(A, ipiv, &sequence, &request); } // If status/=0, matrix is singular don't copy back the factored matrix. if (sequence.status == 0) { // Translate back to LAPACK layout. #pragma omp parallel #pragma omp master { plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); } } else { plasma_request_fail(&sequence, &request, imin(m,n) + sequence.status); } // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_zgetrf(plasma_desc_t A, int *ipiv, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0) return; // Call the parallel function. plasma_pzgetrf(A, ipiv, sequence, request); }
par_grid_hash.c
#include "par_grid_hash.h" int main(int argc, char* argv[]){ char* input_name; /**< Input data file name */ int generations = 0; /**< Number of generations to proccess */ int cube_size = 0; /**< Size of the 3D space */ GraphNode*** graph; /**< Graph representation - 2D array of lists */ Hashtable* hashtable; /**< Contains the information of nodes that are alive */ /* Iterator variables */ int g, i, j; GraphNode* g_it = NULL; Node* it = NULL; /* Lock variables */ omp_lock_t** graph_lock; parseArgs(argc, argv, &input_name, &generations); int initial_alive = getAlive(input_name); /* Create the hashtable */ hashtable = createHashtable(HASH_RATIO * initial_alive); graph = parseFile(input_name, hashtable, &cube_size); debug_print("Hashtable: Occupation %.1f, Average %.2f elements per bucket", (hashtable->occupied*1.0) / hashtable->size, (hashtable->elements*1.0) / hashtable->occupied); /* Initialize lock variables */ graph_lock = (omp_lock_t**)malloc(cube_size * sizeof(omp_lock_t*)); for(i = 0; i < cube_size; i++){ graph_lock[i] = (omp_lock_t*) malloc(cube_size * sizeof(omp_lock_t)); for(j = 0; j < cube_size; j++){ omp_init_lock(&(graph_lock[i][j])); } } double start = omp_get_wtime(); // Start Timer /* Generations */ for(g = 1; g <= generations; g++){ /* Convert hashtable to an array of currently alive nodes */ int num_alive = hashtable->elements; Node** vector = (Node**) malloc(sizeof(Node*) * num_alive); i = 0; for(j = 0; j < hashtable->size; j++){ for (it = hashtable->table[j]; it != NULL; it = it->next){ vector[i++] = it; } } /* Create the num_alive * 6 matrix that will store the neighbours of each alive node */ Node*** neighbour_vector = (Node***)malloc(sizeof(Node**) * num_alive); #pragma omp parallel for private(i, j) for(i = 0; i < num_alive; i++){ neighbour_vector[i] = (Node**)malloc(sizeof(Node*) * 6); for(j = 0; j < 6; j++){ neighbour_vector[i][j] = NULL; } } #pragma omp parallel for private(i, j) /* Notify each of the neighbours, inserting them in the graph if needed */ for (i = 0; i < num_alive; i++){ /* Get the coordinates from the alive node */ coordinate x = vector[i]->x; coordinate y = vector[i]->y; coordinate z = vector[i]->z; /* Calculate the coordinates of the 6 neighbours */ coordinate x1, x2, y1, y2, z1, z2; x1 = (x+1) % cube_size; x2 = (x-1) < 0 ? (cube_size-1) : (x-1); y1 = (y+1) % cube_size; y2 = (y-1) < 0 ? (cube_size-1) : (y-1); z1 = (z+1) % cube_size; z2 = (z-1) < 0 ? (cube_size-1) : (z-1); /* Auxiliary matrix of neighbour coordinates for simpler code */ coordinate c[6][3] = {{x1,y,z}, {x2,y,z}, {x,y1,z}, {x,y2,z}, {x,y,z1}, {x,y,z2}}; GraphNode* ptr; // Auxiliary GraphNode pointer to a newly inserted node /* When a node is inserted in the graph, a pointer to it is stored in the hashtable */ for(j = 0; j < 6; j++){ if(graphNodeAddNeighbour( &(graph [c[j][X]] [c[j][Y]]), c[j][Z], &ptr, &(graph_lock [c[j][X]] [c[j][Y]]))){ neighbour_vector[i][j] = nodeInsert(NULL, c[j][X], c[j][Y], c[j][Z], ptr); }else{ neighbour_vector[i][j] = NULL; } } } #pragma omp parallel for private(it, i, j) /* Determine the next state of each of the cells */ for(i = 0; i < num_alive; i++){ /* Process alive node in vector*/ it = vector[i]; unsigned char live_neighbours = it->ptr->neighbours; it->ptr->neighbours = 0; if(it->ptr->state == ALIVE){ if(live_neighbours < 2 || live_neighbours > 4){ it->ptr->state = DEAD; graphNodeRemove(&(graph[it->x][it->y]), it->z, &(graph_lock[it->x][it->y])); hashtableRemove(hashtable, it->x, it->y, it->z); } } /* Process its neighbours */ for(j = 0; j < 6; j++){ it = neighbour_vector[i][j]; if(it != NULL){ unsigned char live_neighbours = it->ptr->neighbours; it->ptr->neighbours = 0; if(it->ptr->state == DEAD){ if(live_neighbours == 2 || live_neighbours == 3){ it->ptr->state = ALIVE; hashtableWrite(hashtable, it->x, it->y, it->z, it->ptr); } else{ graphNodeRemove(&(graph[it->x][it->y]), it->z, &(graph_lock[it->x][it->y])); } } } } } /* Cleanup matrix and vector*/ for(i = 0; i < num_alive; i++){ for(j = 0; j < 6; j++){ free(neighbour_vector[i][j]); } free(neighbour_vector[i]); } free(neighbour_vector); free(vector); } double end = omp_get_wtime(); // Stop Timer /* Print the final set of live cells */ printAndSortActive(graph, cube_size); time_print(" %f\n", end - start); /* Free resources */ freeGraph(graph, cube_size); hashtableFree(hashtable); for(i = 0; i < cube_size; i++){ for(j=0; j<cube_size; j++){ omp_destroy_lock(&(graph_lock[i][j])); } } free(input_name); return 0; } /* Graph related functions */ GraphNode*** initGraph(int size){ int i,j; GraphNode*** graph = (GraphNode***) malloc(sizeof(GraphNode**) * size); for (i = 0; i < size; i++){ graph[i] = (GraphNode**) malloc(sizeof(GraphNode*) * size); for (j = 0; j < size; j++){ graph[i][j] = NULL; } } return graph; } void freeGraph(GraphNode*** graph, int size){ int i, j; if (graph != NULL){ for (i = 0; i < size; i++){ for (j = 0; j < size; j++){ graphNodeDelete(graph[i][j]); } free(graph[i]); } free(graph); } } void printAndSortActive(GraphNode*** graph, int cube_size){ int x,y; GraphNode* it; for (x = 0; x < cube_size; ++x){ for (y = 0; y < cube_size; ++y){ /* Sort the list by ascending coordinate z */ graphNodeSort(&(graph[x][y])); for (it = graph[x][y]; it != NULL; it = it->next){ if (it->state == ALIVE) out_print("%d %d %d\n", x, y, it->z); } } } } void printSortedGraphToFile(GraphNode*** graph, int cube_size, char* input_name, int generations){ int x,y; GraphNode* it; char* output_name = generateOuputFilename(input_name, generations); FILE* output = fopen(output_name, "w"); if (output == NULL){ err_print("Could not open output file"); exit(EXIT_FAILURE); } for (x = 0; x < cube_size; ++x){ for (y = 0; y < cube_size; ++y){ /* Sort the list by ascending coordinate z */ graphNodeSort(&(graph[x][y])); for (it = graph[x][y]; it != NULL; it = it->next){ fprintf(output, "%d %d %d\n", x, y, it->z); } } } free(output_name); fclose(output); } char* generateOuputFilename(char* input_name, int generations){ char generation[GEN_BUFFER_SIZE]; sprintf(generation, "%d", generations); char* in_ext = findLastDot(input_name); int aux_length = strlen(input_name) - strlen(in_ext); char* aux = malloc(sizeof(char) * (aux_length + 1)); strncpy(aux, input_name, aux_length); aux[aux_length] = '\0'; char* output_name = malloc(sizeof(char) * (aux_length + strlen(generation) + strlen(OUT_EXT)) + 3); sprintf(output_name, "%s.%s.%s", aux, generation, OUT_EXT); free(aux); return output_name; } char* findLastDot(char* str){ char *ptr = str; char *dot = NULL; while(*ptr++){ if (*ptr == '.') dot = ptr; } return dot; } /* File parsing functions */ void parseArgs(int argc, char* argv[], char** file, int* generations){ if (argc == 3){ char* file_name = malloc(sizeof(char) * (strlen(argv[1]) + 1)); strcpy(file_name, argv[1]); *file = file_name; *generations = atoi(argv[2]); if (*generations > 0 && file_name != NULL) return; } printf("Usage: %s [data_file.in] [number_generations]", argv[0]); exit(EXIT_FAILURE); } int getAlive(char* file){ FILE* fp = fopen(file, "r"); int alive_num = 0; if (fp == NULL){ return EXIT_FAILURE; } while(!feof(fp)){ if(fgetc(fp) == '\n'){ alive_num++; } } fclose(fp); return alive_num - 1; } GraphNode*** parseFile(char* input_name, Hashtable* hashtable, int* cube_size){ int first = 0; char line[BUFFER_SIZE]; int x, y, z; FILE* fp = fopen(input_name, "r"); if(fp == NULL){ err_print("Please input a valid file name"); exit(EXIT_FAILURE); } GraphNode*** graph; while(fgets(line, sizeof(line), fp)){ if(!first){ if(sscanf(line, "%d\n", cube_size) == 1){ first = 1; graph = initGraph(*cube_size); } }else{ if(sscanf(line, "%d %d %d\n", &x, &y, &z) == 3){ /* Insert live nodes in the graph and the update set */ graph[x][y] = graphNodeInsert(graph[x][y], z, ALIVE); hashtableWrite(hashtable, x, y, z, (GraphNode*)(graph[x][y])); } } } fclose(fp); return graph; }
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB #ifdef ENABLE_TASK #if !defined(PFOR_TO_ALLATONCE) && !defined(PFOR_TO_BISECTION) && !defined(PFOR_TO_ORIGINAL) #define PFOR_TO_BISECTION 1 #endif #define PFOR2_EXPERIMENTAL #include <tp_parsec.h> #endif // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); #pragma ivdep for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else //ENABLE_TBB #ifdef ENABLE_TASK #if 0 /* ivdep inside a lambda closure seems not to be effective, icc failed to auto-vectorize with reasons of anti-dependences & flow dependences between iterations which should have been ignored by the indication of ivdep. */ void bs_thread(void *tid_ptr) { int tid = *(int *)tid_ptr; int start = 0; int end = numOptions; const int GRAIN_SIZE = 1024; for(int j=0; j<NUM_RUNS; j++) { pfor(start, end, 1, GRAIN_SIZE, [](int first, int last) { cilk_begin; #pragma ivdep for (int i = first; i < last;i++) { fptype price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } cilk_void_return; }); } } #endif /* by moving the lambda closure to a separate function, ivdep indication now seems to be effective. icc succeeded in auto-vectorization without concerns of anti-dependences & flow dependences anymore. Notice: gcc still failed to auto-vectorize even with these ivdep indications. */ void pfor_func(int first, int last) { cilk_begin; #pragma ivdep for (int i = first; i < last;i++) { fptype price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } cilk_void_return; } void bs_thread(void *tid_ptr) { int tid = *(int *)tid_ptr; int start = 0; int end = numOptions; const int GRAIN_SIZE = 10000; for(int j=0; j<NUM_RUNS; j++) { pfor(start, end, 1, GRAIN_SIZE, pfor_func); } } /* void bs_thread_void(void *tid_ptr) { task_begin; int j; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { mk_task_group; create_task0(spawn bs_task(start, start + (end-start)/2)); call_task(spawn bs_task(start + (end-start)/2, end)); wait_tasks; } task_void_return; } int bs_thread(void *tid_ptr) { bs_thread_void(tid_ptr); return 0; } */ #else //ENABLE_TASK #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) #pragma ivdep for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP #pragma ivdep for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } #endif //ENABLE_TASK #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version " __PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif #if defined(ENABLE_TASK) tp_init(); #endif if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = atoi(argv[1]); char *inputFile = argv[2]; char *outputFile = argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) &&!defined(ENABLE_TASK) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", int(numOptions * (sizeof(OptionData) + sizeof(int)))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid=0; bs_thread(&tid); #else //ENABLE_TBB #ifdef ENABLE_TASK //serial version int tid=0; pragma_omp_parallel_single(nowait, { bs_thread(&tid); }); #else //serial version int tid=0; bs_thread(&tid); #endif // ENABLE_TASK #endif //ENABLE_TBB #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
expected_output.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> //--------------------------------------------------------------------- // program MG //--------------------------------------------------------------------- //---------- // Class S: //---------- //---------- // Class W: //---------- //---------- // Class A: //---------- //---------- // Class B: //---------- //---------- // Class C: //---------- //---------- // Class D: //---------- //---------- // Class E: //---------- struct anon_NAS_MG_c_132 { double real; double imag; }; typedef struct anon_NAS_MG_c_132 dcomplex; // actual dimension including ghost cells for communications // size of rhs array // size of residual array // maximum number of levels //--------------------------------------------------------------------- /*common /mg3/*/ int nx[9]; int ny[9]; int nz[9]; /*common /ClassType/*/ char Class; /*common /my_debug/*/ int debug_vec[8]; /*common /fap/*/ int m1[9]; int m2[9]; int m3[9]; int ir[9]; int lt; int lb; //--------------------------------------------------------------------- // Set at m=1024, can handle cases up to 1024^3 case //--------------------------------------------------------------------- /*common /timers/*/ //-------------------------------------------------------------------------c // These arrays are in common because they are quite large // and probably shouldn't be allocated on the stack. They // are always passed as subroutine args. //-------------------------------------------------------------------------c /*commcon /noautom/*/ double u[2530976]; double v[2530976]; double r[2530976]; /*common /grid/*/ int is1; int is2; int is3; int ie1; int ie2; int ie3; void setup(int *n1, int *n2, int *n3); void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3); void psinv(void *or, void *ou, int n1, int n2, int n3, double c[4], int k); void resid(void *ou, void *ov, void *or, int n1, int n2, int n3, double a[4], int k); void rprj3(void *or, int m1k, int m2k, int m3k, void *os, int m1j, int m2j, int m3j, int k); void interp(void *oz, int mm1, int mm2, int mm3, void *ou, int n1, int n2, int n3, int k); void norm2u3(void *or, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz); void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk); void comm3(void *ou, int n1, int n2, int n3, int kk); void zran3(void *oz, int n1, int n2, int n3, int nx, int ny, int k); void showall(void *oz, int n1, int n2, int n3); double power(double a, int n); void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2], int m, int ind); void zero3(void *oz, int n1, int n2, int n3); double randlc(double *x, double a); void vranlc(int n, double *x, double a, double y[]); void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified); double start[64]; double elapsed[64]; double elapsed_time(); void timer_clear(int n); void timer_start(int n); void timer_stop(int n); double timer_read(int n); void wtime(double *t); int main() { //-------------------------------------------------------------------------c // k is the current level. It is passed down through subroutine args // and is NOT global. it is the current iteration //-------------------------------------------------------------------------c int k, it; double t, tinit, mflops; double a[4]; double c[4]; double rnm2, rnmu, old2, oldu, epsilon; int n1, n2, n3, nit; double nn, verify_value, err; int verified; int i; char *t_names[10]; double tmax; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(i = 0; i < 10; i++) { timer_clear(i); } timer_start(0); printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - MG Benchmark\n\n"); printf(" No input file. Using compiled defaults \n"); lt = 7; nit = 4; nx[lt] = 128; ny[lt] = 128; nz[lt] = 128; /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(i = 0; i <= 7; i++) { debug_vec[i] = 0; } if((nx[lt] != ny[lt]) || (nx[lt] != nz[lt])) { Class = 'U'; } else if(nx[lt] == 32 && nit == 4) { Class = 'S'; } else if(nx[lt] == 128 && nit == 4) { Class = 'W'; } else if(nx[lt] == 256 && nit == 4) { Class = 'A'; } else if(nx[lt] == 256 && nit == 20) { Class = 'B'; } else if(nx[lt] == 512 && nit == 20) { Class = 'C'; } else if(nx[lt] == 1024 && nit == 50) { Class = 'D'; } else if(nx[lt] == 2048 && nit == 50) { Class = 'E'; } else { Class = 'U'; } //--------------------------------------------------------------------- // Use these for debug info: //--------------------------------------------------------------------- // debug_vec(0) = 1 !=> report all norms // debug_vec(1) = 1 !=> some setup information // debug_vec(1) = 2 !=> more setup information // debug_vec(2) = k => at level k or below, show result of resid // debug_vec(3) = k => at level k or below, show result of psinv // debug_vec(4) = k => at level k or below, show result of rprj // debug_vec(5) = k => at level k or below, show result of interp // debug_vec(6) = 1 => (unused) // debug_vec(7) = 1 => (unused) //--------------------------------------------------------------------- a[0] = -8.0 / 3.0; a[1] = 0.0; a[2] = 1.0 / 6.0; a[3] = 1.0 / 12.0; if(Class == 'A' || Class == 'S' || Class == 'W') { //--------------------------------------------------------------------- // Coefficients for the S(a) smoother //--------------------------------------------------------------------- c[0] = -3.0 / 8.0; c[1] = +1.0 / 32.0; c[2] = -1.0 / 64.0; c[3] = 0.0; } else { //--------------------------------------------------------------------- // Coefficients for the S(b) smoother //--------------------------------------------------------------------- c[0] = -3.0 / 17.0; c[1] = +1.0 / 33.0; c[2] = -1.0 / 61.0; c[3] = 0.0; } lb = 1; k = lt; setup(&n1, &n2, &n3); zero3(u, n1, n2, n3); zran3(v, n1, n2, n3, nx[lt], ny[lt], k); norm2u3(v, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); printf(" Size: %4dx%4dx%4d (class %c)\n", nx[lt], ny[lt], nz[lt], Class); printf(" Iterations: %3d\n", nit); printf("\n"); resid(u, v, r, n1, n2, n3, a, k); norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); old2 = rnm2; oldu = rnmu; //--------------------------------------------------------------------- // One iteration for startup //--------------------------------------------------------------------- mg3P(u, v, r, a, c, n1, n2, n3); resid(u, v, r, n1, n2, n3, a, k); setup(&n1, &n2, &n3); zero3(u, n1, n2, n3); zran3(v, n1, n2, n3, nx[lt], ny[lt], k); timer_stop(0); tinit = timer_read(0); printf(" Initialization time: %15.3f seconds\n\n", tinit); /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(i = 1; i < 10; i++) { timer_clear(i); } timer_start(1); resid(u, v, r, n1, n2, n3, a, k); norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); old2 = rnm2; oldu = rnmu; /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : printf#283{printf(" iter %3d\n", it)} mg3P#285{mg3P(u, v, r, a, c, n1, n2, n3)} resid#286{resid(u, v, r, n1, n2, n3, a, k)} ****************************************/ for(it = 1; it <= nit; it++) { if((it == 1) || (it == nit) || ((it % 5) == 0)) { printf(" iter %3d\n", it); } mg3P(u, v, r, a, c, n1, n2, n3); resid(u, v, r, n1, n2, n3, a, k); } norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); timer_stop(1); t = timer_read(1); verified = 0; verify_value = 0.0; printf("\n Benchmark completed\n"); epsilon = 1.0e-8; if(Class != 'U') { if(Class == 'S') { verify_value = 0.5307707005734e-04; } else if(Class == 'W') { verify_value = 0.6467329375339e-05; } else if(Class == 'A') { verify_value = 0.2433365309069e-05; } else if(Class == 'B') { verify_value = 0.1800564401355e-05; } else if(Class == 'C') { verify_value = 0.5706732285740e-06; } else if(Class == 'D') { verify_value = 0.1583275060440e-09; } else if(Class == 'E') { verify_value = 0.8157592357404e-10; } err = fabs(rnm2 - verify_value) / verify_value; // err = fabs( rnm2 - verify_value ); if(err <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.13E\n", rnm2); printf(" Error is %20.13E\n", err); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.13E\n", rnm2); printf(" The correct L2 Norm is %20.13E\n", verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); printf(" L2 Norm is %20.13E\n", rnm2); } nn = 1.0 * nx[lt] * ny[lt] * nz[lt]; if(t != 0.0) { mflops = 58.0 * nit * nn * 1.0e-6 / t; } else { mflops = 0.0; } print_results("MG", Class, nx[lt], ny[lt], nz[lt], nit, t, mflops, " floating point", verified); int exitValue = verified ? 0 : 1; return exitValue; } void setup(int *n1, int *n2, int *n3) { int k, j; int ax; int mi[9][3]; int ng[9][3]; ng[lt][0] = nx[lt]; ng[lt][1] = ny[lt]; ng[lt][2] = nz[lt]; /*************** Clava msgError ************** unsolved dependency for arrayAccess ng use : RW ****************************************/ for(k = lt - 1; k >= 1; k--) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(ax = 0; ax < 3; ax++) { ng[k][ax] = ng[k + 1][ax] / 2; } } #pragma omp parallel for default(shared) private(k) firstprivate(lt, ng) for(k = lt; k >= 1; k--) { nx[k] = ng[k][0]; ny[k] = ng[k][1]; nz[k] = ng[k][2]; } #pragma omp parallel for default(shared) private(k, ax) firstprivate(lt, ng) for(k = lt; k >= 1; k--) { /*************** Clava msgError ************** Loop Iteration number is too low ****************************************/ for(ax = 0; ax < 3; ax++) { mi[k][ax] = 2 + ng[k][ax]; } m1[k] = mi[k][0]; m2[k] = mi[k][1]; m3[k] = mi[k][2]; } k = lt; is1 = 2 + ng[k][0] - ng[lt][0]; ie1 = 1 + ng[k][0]; *n1 = 3 + ie1 - is1; is2 = 2 + ng[k][1] - ng[lt][1]; ie2 = 1 + ng[k][1]; *n2 = 3 + ie2 - is2; is3 = 2 + ng[k][2] - ng[lt][2]; ie3 = 1 + ng[k][2]; *n3 = 3 + ie3 - is3; ir[lt] = 0; /*************** Clava msgError ************** unsolved dependency for arrayAccess ir use : RW ****************************************/ for(j = lt - 1; j >= 1; j--) { ir[j] = ir[j + 1] + 1 * m1[j + 1] * m2[j + 1] * m3[j + 1]; } if(debug_vec[1] >= 1) { printf(" in setup, \n"); printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", k, lt, ng[k][0], ng[k][1], ng[k][2], *n1, *n2, *n3, is1, is2, is3, ie1, ie2, ie3); } } //--------------------------------------------------------------------- // multigrid V-cycle routine //--------------------------------------------------------------------- void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3) { int j, k; //--------------------------------------------------------------------- // down cycle. // restrict the residual from the find grid to the coarse //--------------------------------------------------------------------- /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : rprj3#435{rprj3(&r[ir[k]], m1[k], m2[k], m3[k], &r[ir[k - 1]], m1[k - 1], m2[k - 1], m3[k - 1], k)} ****************************************/ for(k = lt; k >= lb + 1; k--) { j = k - 1; rprj3(&r[ir[k]], m1[k], m2[k], m3[k], &r[ir[j]], m1[j], m2[j], m3[j], k); } k = lb; //--------------------------------------------------------------------- // compute an approximate solution on the coarsest grid //--------------------------------------------------------------------- zero3(&u[ir[k]], m1[k], m2[k], m3[k]); psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : interp#451{interp(&u[ir[k - 1]], m1[k - 1], m2[k - 1], m3[k - 1], &u[ir[k]], m1[k], m2[k], m3[k], k)} resid#455{resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k)} psinv#459{psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k)} ****************************************/ for(k = lb + 1; k <= lt - 1; k++) { j = k - 1; //--------------------------------------------------------------------- // prolongate from level k-1 to k //--------------------------------------------------------------------- zero3(&u[ir[k]], m1[k], m2[k], m3[k]); interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k); //--------------------------------------------------------------------- // compute residual for level k //--------------------------------------------------------------------- resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k); //--------------------------------------------------------------------- // apply smoother //--------------------------------------------------------------------- psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k); resid(u, v, r, n1, n2, n3, a, k); psinv(r, u, n1, n2, n3, c, k); } //--------------------------------------------------------------------- // psinv applies an approximate inverse as smoother: u = u + Cr // // This implementation costs 15A + 4M per result, where // A and M denote the costs of Addition and Multiplication. // Presuming coefficient c(3) is zero (the NPB assumes this, // but it is thus not a general case), 2A + 1M may be eliminated, // resulting in 13A + 3M. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- void psinv(void *or, void *ou, int n1, int n2, int n3, double c[4], int k) { double (*r)[n2][n1] = (double (*)[n2][n1]) or; double (*u)[n2][n1] = (double (*)[n2][n1]) ou; int i3, i2, i1; double r1[131]; double r2[131]; #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1, r, c, r1, r2) for(i3 = 1; i3 < n3 - 1; i3++) { // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(n2, n1, i3, r, c, r1, r2) for(i2 = 1; i2 < n2 - 1; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, i2, i3, r) for(i1 = 0; i1 < n1; i1++) { r1[i1] = r[i3][i2 - 1][i1] + r[i3][i2 + 1][i1] + r[i3 - 1][i2][i1] + r[i3 + 1][i2][i1]; r2[i1] = r[i3 - 1][i2 - 1][i1] + r[i3 - 1][i2 + 1][i1] + r[i3 + 1][i2 - 1][i1] + r[i3 + 1][i2 + 1][i1]; } // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, i3, i2, r, r1, r2, c) for(i1 = 1; i1 < n1 - 1; i1++) { u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * (r[i3][i2][i1 - 1] + r[i3][i2][i1 + 1] + r1[i1]) + c[2] * (r2[i1] + r1[i1 - 1] + r1[i1 + 1]); //-------------------------------------------------------------------- // Assume c[3] = 0 (Enable line below if c[3] not= 0) //-------------------------------------------------------------------- // + c[3] * ( r2[i1-1] + r2[i1+1] ) //-------------------------------------------------------------------- } } } //--------------------------------------------------------------------- // exchange boundary points //--------------------------------------------------------------------- comm3(u, n1, n2, n3, k); if(debug_vec[0] >= 1) { rep_nrm(u, n1, n2, n3, " psinv", k); } if(debug_vec[3] >= k) { showall(u, n1, n2, n3); } } //--------------------------------------------------------------------- // resid computes the residual: r = v - Au // // This implementation costs 15A + 4M per result, where // A and M denote the costs of Addition (or Subtraction) and // Multiplication, respectively. // Presuming coefficient a(1) is zero (the NPB assumes this, // but it is thus not a general case), 3A + 1M may be eliminated, // resulting in 12A + 3M. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- void resid(void *ou, void *ov, void *or, int n1, int n2, int n3, double a[4], int k) { double (*u)[n2][n1] = (double (*)[n2][n1]) ou; double (*v)[n2][n1] = (double (*)[n2][n1]) ov; double (*r)[n2][n1] = (double (*)[n2][n1]) or; int i3, i2, i1; double u1[131]; double u2[131]; #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1, u, a, v, u1, u2) for(i3 = 1; i3 < n3 - 1; i3++) { // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(n2, n1, i3, u, a, v, u1, u2) for(i2 = 1; i2 < n2 - 1; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, i2, i3, u) for(i1 = 0; i1 < n1; i1++) { u1[i1] = u[i3][i2 - 1][i1] + u[i3][i2 + 1][i1] + u[i3 - 1][i2][i1] + u[i3 + 1][i2][i1]; u2[i1] = u[i3 - 1][i2 - 1][i1] + u[i3 - 1][i2 + 1][i1] + u[i3 + 1][i2 - 1][i1] + u[i3 + 1][i2 + 1][i1]; } // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, i3, i2, u2, u1, a, u, v) for(i1 = 1; i1 < n1 - 1; i1++) { r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] - a[2] * (u2[i1] + u1[i1 - 1] + u1[i1 + 1]) - a[3] * (u2[i1 - 1] + u2[i1 + 1]); //------------------------------------------------------------------- // Assume a[1] = 0 (Enable 2 lines below if a[1] not= 0) //------------------------------------------------------------------- // - a[1] * ( u[i3][i2][i1-1] + u[i3][i2][i1+1] // + u1[i1] ) //------------------------------------------------------------------- } } } //--------------------------------------------------------------------- // exchange boundary data //--------------------------------------------------------------------- comm3(r, n1, n2, n3, k); if(debug_vec[0] >= 1) { rep_nrm(r, n1, n2, n3, " resid", k); } if(debug_vec[2] >= k) { showall(r, n1, n2, n3); } } //--------------------------------------------------------------------- // rprj3 projects onto the next coarser grid, // using a trilinear Finite Element projection: s = r' = P r // // This implementation costs 20A + 4M per result, where // A and M denote the costs of Addition and Multiplication. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- void rprj3(void *or, int m1k, int m2k, int m3k, void *os, int m1j, int m2j, int m3j, int k) { double (*r)[m2k][m1k] = (double (*)[m2k][m1k]) or; double (*s)[m2j][m1j] = (double (*)[m2j][m1j]) os; int j3, j2, j1, i3, i2, i1, d1, d2, d3, j; double x1[131]; double y1[131]; double x2; double y2; if(m1k == 3) { d1 = 2; } else { d1 = 1; } if(m2k == 3) { d2 = 2; } else { d2 = 1; } if(m3k == 3) { d3 = 2; } else { d3 = 1; } #pragma omp parallel for default(shared) private(j3, j2, j1, i3, i2, i1, y2, x2) firstprivate(m3j, d3, m2j, d2, m1j, d1, r, x1, y1) for(j3 = 1; j3 < m3j - 1; j3++) { i3 = 2 * j3 - d3; // #pragma omp parallel for default(shared) private(j2, j1, i2, i1, y2, x2) firstprivate(m2j, d2, m1j, d1, i3, j3, d3, r, x1, y1) for(j2 = 1; j2 < m2j - 1; j2++) { i2 = 2 * j2 - d2; // #pragma omp parallel for default(shared) private(j1, i1) firstprivate(m1j, d1, j2, i3, d2, i2, j3, d3, r) for(j1 = 1; j1 < m1j; j1++) { i1 = 2 * j1 - d1; x1[i1] = r[i3 + 1][i2][i1] + r[i3 + 1][i2 + 2][i1] + r[i3][i2 + 1][i1] + r[i3 + 2][i2 + 1][i1]; y1[i1] = r[i3][i2][i1] + r[i3 + 2][i2][i1] + r[i3][i2 + 2][i1] + r[i3 + 2][i2 + 2][i1]; } // #pragma omp parallel for default(shared) private(j1, i1, y2, x2) firstprivate(m1j, d1, j3, j2, d3, d2, i3, i2, r, x1, y1) for(j1 = 1; j1 < m1j - 1; j1++) { i1 = 2 * j1 - d1; y2 = r[i3][i2][i1 + 1] + r[i3 + 2][i2][i1 + 1] + r[i3][i2 + 2][i1 + 1] + r[i3 + 2][i2 + 2][i1 + 1]; x2 = r[i3 + 1][i2][i1 + 1] + r[i3 + 1][i2 + 2][i1 + 1] + r[i3][i2 + 1][i1 + 1] + r[i3 + 2][i2 + 1][i1 + 1]; s[j3][j2][j1] = 0.5 * r[i3 + 1][i2 + 1][i1 + 1] + 0.25 * (r[i3 + 1][i2 + 1][i1] + r[i3 + 1][i2 + 1][i1 + 2] + x2) + 0.125 * (x1[i1] + x1[i1 + 2] + y2) + 0.0625 * (y1[i1] + y1[i1 + 2]); } } } j = k - 1; comm3(s, m1j, m2j, m3j, j); if(debug_vec[0] >= 1) { rep_nrm(s, m1j, m2j, m3j, " rprj3", k - 1); } if(debug_vec[4] >= k) { showall(s, m1j, m2j, m3j); } } //--------------------------------------------------------------------- // interp adds the trilinear interpolation of the correction // from the coarser grid to the current approximation: u = u + Qu' // // Observe that this implementation costs 16A + 4M, where // A and M denote the costs of Addition and Multiplication. // Note that this vectorizes, and is also fine for cache // based machines. Vector machines may get slightly better // performance however, with 8 separate "do i1" loops, rather than 4. //--------------------------------------------------------------------- void interp(void *oz, int mm1, int mm2, int mm3, void *ou, int n1, int n2, int n3, int k) { double (*z)[mm2][mm1] = (double (*)[mm2][mm1]) oz; double (*u)[n2][n1] = (double (*)[n2][n1]) ou; int i3, i2, i1, d1, d2, d3, t1, t2, t3; // note that m = 1037 in globals.h but for this only need to be // 535 to handle up to 1024^3 // integer m // parameter( m=535 ) double z1[131]; double z2[131]; double z3[131]; if(n1 != 3 && n2 != 3 && n3 != 3) { #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(mm3, mm2, mm1, z, z1, z2, z3) for(i3 = 0; i3 < mm3 - 1; i3++) { // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(mm2, mm1, i3, z, z1, z2, z3) for(i2 = 0; i2 < mm2 - 1; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i2, i3, z) for(i1 = 0; i1 < mm1; i1++) { z1[i1] = z[i3][i2 + 1][i1] + z[i3][i2][i1]; z2[i1] = z[i3 + 1][i2][i1] + z[i3][i2][i1]; z3[i1] = z[i3 + 1][i2 + 1][i1] + z[i3 + 1][i2][i1] + z1[i1]; } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i3, i2, z) for(i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3][2 * i2][2 * i1] = u[2 * i3][2 * i2][2 * i1] + z[i3][i2][i1]; u[2 * i3][2 * i2][2 * i1 + 1] = u[2 * i3][2 * i2][2 * i1 + 1] + 0.5 * (z[i3][i2][i1 + 1] + z[i3][i2][i1]); } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i2, i3, z1) for(i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3][2 * i2 + 1][2 * i1] = u[2 * i3][2 * i2 + 1][2 * i1] + 0.5 * z1[i1]; u[2 * i3][2 * i2 + 1][2 * i1 + 1] = u[2 * i3][2 * i2 + 1][2 * i1 + 1] + 0.25 * (z1[i1] + z1[i1 + 1]); } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i3, i2, z2) for(i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3 + 1][2 * i2][2 * i1] = u[2 * i3 + 1][2 * i2][2 * i1] + 0.5 * z2[i1]; u[2 * i3 + 1][2 * i2][2 * i1 + 1] = u[2 * i3 + 1][2 * i2][2 * i1 + 1] + 0.25 * (z2[i1] + z2[i1 + 1]); } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i3, i2, z3) for(i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3 + 1][2 * i2 + 1][2 * i1] = u[2 * i3 + 1][2 * i2 + 1][2 * i1] + 0.25 * z3[i1]; u[2 * i3 + 1][2 * i2 + 1][2 * i1 + 1] = u[2 * i3 + 1][2 * i2 + 1][2 * i1 + 1] + 0.125 * (z3[i1] + z3[i1 + 1]); } } } } else { if(n1 == 3) { d1 = 2; t1 = 1; } else { d1 = 1; t1 = 0; } if(n2 == 3) { d2 = 2; t2 = 1; } else { d2 = 1; t2 = 0; } if(n3 == 3) { d3 = 2; t3 = 1; } else { d3 = 1; t3 = 0; } #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(d3, mm3, d2, mm2, d1, mm1, t1, t2, z) for(i3 = d3; i3 <= mm3 - 1; i3++) { // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(d2, mm2, d1, mm1, i3, d3, t1, z) for(i2 = d2; i2 <= mm2 - 1; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(d1, mm1, i3, i2, d3, d2, z) for(i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]; } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i3, i2, d3, d2, t1, z) for(i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] + 0.5 * (z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2 - 1][i1 - 1]); } } // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(mm2, d1, mm1, i3, d3, t2, t1, z) for(i2 = 1; i2 <= mm2 - 1; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(d1, mm1, i3, i2, d3, t2, z) for(i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] + 0.5 * (z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i3, i2, d3, t2, t1, z) for(i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] + 0.25 * (z[i3 - 1][i2][i1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } } } #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(mm3, d2, mm2, d1, mm1, t3, t1, t2, z) for(i3 = 1; i3 <= mm3 - 1; i3++) { // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(d2, mm2, d1, mm1, i3, t3, t1, z) for(i2 = d2; i2 <= mm2 - 1; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(d1, mm1, i2, i3, t3, d2, z) for(i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] + 0.5 * (z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i2, i3, t3, d2, t1, z) for(i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] + 0.25 * (z[i3][i2 - 1][i1] + z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2 - 1][i1 - 1]); } } // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(mm2, d1, mm1, i3, t3, t2, t1, z) for(i2 = 1; i2 <= mm2 - 1; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(d1, mm1, i2, i3, t3, t2, z) for(i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] + 0.25 * (z[i3][i2][i1 - 1] + z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } // #pragma omp parallel for default(shared) private(i1) firstprivate(mm1, i2, i3, t3, t2, t1, z) for(i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] + 0.125 * (z[i3][i2][i1] + z[i3][i2 - 1][i1] + z[i3][i2][i1 - 1] + z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2][i1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } } } } if(debug_vec[0] >= 1) { rep_nrm(z, mm1, mm2, mm3, "z: inter", k - 1); rep_nrm(u, n1, n2, n3, "u: inter", k); } if(debug_vec[5] >= k) { showall(z, mm1, mm2, mm3); showall(u, n1, n2, n3); } } //--------------------------------------------------------------------- // norm2u3 evaluates approximations to the L2 norm and the // uniform (or L-infinity or Chebyshev) norm, under the // assumption that the boundaries are periodic or zero. Add the // boundaries in with half weight (quarter weight on the edges // and eighth weight at the corners) for inhomogeneous boundaries. //--------------------------------------------------------------------- void norm2u3(void *or, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz) { double (*r)[n2][n1] = (double (*)[n2][n1]) or; double s, a; int i3, i2, i1; double dn; dn = 1.0 * nx * ny * nz; s = 0.0; *rnmu = 0.0; /*************** Clava msgError ************** Variable rnmu could not be categorized into any OpenMP Variable Scopeuse : RW ****************************************/ for(i3 = 1; i3 < n3 - 1; i3++) { /*************** Clava msgError ************** Variable rnmu could not be categorized into any OpenMP Variable Scopeuse : RW ****************************************/ for(i2 = 1; i2 < n2 - 1; i2++) { /*************** Clava msgError ************** Variable rnmu could not be categorized into any OpenMP Variable Scopeuse : RW ****************************************/ for(i1 = 1; i1 < n1 - 1; i1++) { s = s + pow(r[i3][i2][i1], 2.0); a = fabs(r[i3][i2][i1]); if(a > *rnmu) *rnmu = a; } } } *rnm2 = sqrt(s / dn); } //--------------------------------------------------------------------- // report on norm //--------------------------------------------------------------------- void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk) { double rnm2, rnmu; norm2u3(u, n1, n2, n3, &rnm2, &rnmu, nx[kk], ny[kk], nz[kk]); printf(" Level%2d in %8s: norms =%21.14E%21.14E\n", kk, title, rnm2, rnmu); } //--------------------------------------------------------------------- // comm3 organizes the communication on all borders //--------------------------------------------------------------------- void comm3(void *ou, int n1, int n2, int n3, int kk) { double (*u)[n2][n1] = (double (*)[n2][n1]) ou; int i1, i2, i3; #pragma omp parallel for default(shared) private(i3, i2) firstprivate(n3, n2, n1) for(i3 = 1; i3 < n3 - 1; i3++) { // #pragma omp parallel for default(shared) private(i2) firstprivate(n2, n1, i3) for(i2 = 1; i2 < n2 - 1; i2++) { u[i3][i2][0] = u[i3][i2][n1 - 2]; u[i3][i2][n1 - 1] = u[i3][i2][1]; } } #pragma omp parallel for default(shared) private(i3, i1) firstprivate(n3, n1, n2) for(i3 = 1; i3 < n3 - 1; i3++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, n2, i3) for(i1 = 0; i1 < n1; i1++) { u[i3][0][i1] = u[i3][n2 - 2][i1]; u[i3][n2 - 1][i1] = u[i3][1][i1]; } } #pragma omp parallel for default(shared) private(i2, i1) firstprivate(n2, n1, n3) for(i2 = 0; i2 < n2; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, n3, i2) for(i1 = 0; i1 < n1; i1++) { u[0][i2][i1] = u[n3 - 2][i2][i1]; u[n3 - 1][i2][i1] = u[1][i2][i1]; } } } //--------------------------------------------------------------------- // zran3 loads +1 at ten randomly chosen points, // loads -1 at a different ten random points, // and zero elsewhere. //--------------------------------------------------------------------- void zran3(void *oz, int n1, int n2, int n3, int nx, int ny, int k) { double (*z)[n2][n1] = (double (*)[n2][n1]) oz; int i0, m0, m1; int i1, i2, i3, d1, e1, e2, e3; double xx, x0, x1, a1, a2, ai; int const mm = 10; double const a = pow(5.0, 13.0); double const x = 314159265.0; double ten[mm][2]; double best; int i; int j1[mm][2]; int j2[mm][2]; int j3[mm][2]; int jg[4][mm][2]; double rdummy; a1 = power(a, nx); a2 = power(a, nx * ny); zero3(z, n1, n2, n3); i = is1 - 2 + nx * (is2 - 2 + ny * (is3 - 2)); ai = power(a, i); d1 = ie1 - is1 + 1; e1 = ie1 - is1 + 2; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = x; rdummy = randlc(&x0, ai); /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : vranlc#931{vranlc(d1, &xx, a, &(z[i3][i2][1]))} ****************************************/ for(i3 = 1; i3 < e3; i3++) { x1 = x0; /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : vranlc#931{vranlc(d1, &xx, a, &(z[i3][i2][1]))} ****************************************/ for(i2 = 1; i2 < e2; i2++) { xx = x1; vranlc(d1, &xx, a, &(z[i3][i2][1])); rdummy = randlc(&x1, a1); } rdummy = randlc(&x0, a2); } //--------------------------------------------------------------------- // comm3(z,n1,n2,n3); // showall(z,n1,n2,n3); //--------------------------------------------------------------------- //--------------------------------------------------------------------- // each processor looks for twenty candidates //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i) firstprivate(mm) for(i = 0; i < mm; i++) { ten[i][1] = 0.0; j1[i][1] = 0; j2[i][1] = 0; j3[i][1] = 0; ten[i][0] = 1.0; j1[i][0] = 0; j2[i][0] = 0; j3[i][0] = 0; } /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : bubble#1011{bubble(ten, j1, j2, j3, mm, 1)} bubble#1018{bubble(ten, j1, j2, j3, mm, 0)} ****************************************/ for(i3 = 1; i3 < n3 - 1; i3++) { /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : bubble#1011{bubble(ten, j1, j2, j3, mm, 1)} bubble#1018{bubble(ten, j1, j2, j3, mm, 0)} ****************************************/ for(i2 = 1; i2 < n2 - 1; i2++) { /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : bubble#1011{bubble(ten, j1, j2, j3, mm, 1)} bubble#1018{bubble(ten, j1, j2, j3, mm, 0)} ****************************************/ for(i1 = 1; i1 < n1 - 1; i1++) { if(z[i3][i2][i1] > ten[0][1]) { ten[0][1] = z[i3][i2][i1]; j1[0][1] = i1; j2[0][1] = i2; j3[0][1] = i3; bubble(ten, j1, j2, j3, mm, 1); } if(z[i3][i2][i1] < ten[0][0]) { ten[0][0] = z[i3][i2][i1]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble(ten, j1, j2, j3, mm, 0); } } } } //--------------------------------------------------------------------- // Now which of these are globally best? //--------------------------------------------------------------------- i1 = mm - 1; i0 = mm - 1; /*************** Clava msgError ************** Variable i1 could not be categorized into any OpenMP Variable Scopeuse : RW Variable i0 could not be categorized into any OpenMP Variable Scopeuse : RW ****************************************/ for(i = mm - 1; i >= 0; i--) { best = 0.0; if(best < ten[i1][1]) { jg[0][i][1] = 0; jg[1][i][1] = is1 - 2 + j1[i1][1]; jg[2][i][1] = is2 - 2 + j2[i1][1]; jg[3][i][1] = is3 - 2 + j3[i1][1]; i1 = i1 - 1; } else { jg[0][i][1] = 0; jg[1][i][1] = 0; jg[2][i][1] = 0; jg[3][i][1] = 0; } best = 1.0; if(best > ten[i0][0]) { jg[0][i][0] = 0; jg[1][i][0] = is1 - 2 + j1[i0][0]; jg[2][i][0] = is2 - 2 + j2[i0][0]; jg[3][i][0] = is3 - 2 + j3[i0][0]; i0 = i0 - 1; } else { jg[0][i][0] = 0; jg[1][i][0] = 0; jg[2][i][0] = 0; jg[3][i][0] = 0; } } // m1 = i1+1; // m0 = i0+1; m1 = 0; m0 = 0; /* int cnt = 0; printf(" \n"); printf(" negative charges at\n"); for (i = 0; i < mm; i++) { printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" positive charges at\n"); for (i = 0; i < mm; i++) { printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" small random numbers were\n"); for (i = mm-1; i >= 0; i--) { printf(" %15.8E", ten[i][0]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" and they were found on processor number\n"); for (i = mm-1; i >= 0; i--) { printf(" %4d", jg[0][i][0]); if (++cnt % 10 == 0) printf("\n"); } cnt = 0; printf(" large random numbers were\n"); for (i = mm-1; i >= 0; i--) { printf(" %15.8E", ten[i][1]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" and they were found on processor number\n"); for (i = mm-1; i >= 0; i--) { printf(" %4d", jg[0][i][1]); if (++cnt % 10 == 0) printf("\n"); } */ #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1) for(i3 = 0; i3 < n3; i3++) { // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(n2, n1, i3) for(i2 = 0; i2 < n2; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, i3, i2) for(i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } /*************** Clava msgError ************** Array access z[jg[3][i][0]][jg[2][i][0]][jg[1][i][0]] which is used for writing has subscript of arrayType jg[3][i][0] ****************************************/ for(i = mm - 1; i >= m0; i--) { z[jg[3][i][0]][jg[2][i][0]][jg[1][i][0]] = -1.0; } /*************** Clava msgError ************** Array access z[jg[3][i][1]][jg[2][i][1]][jg[1][i][1]] which is used for writing has subscript of arrayType jg[3][i][1] ****************************************/ for(i = mm - 1; i >= m1; i--) { z[jg[3][i][1]][jg[2][i][1]][jg[1][i][1]] = +1.0; } comm3(z, n1, n2, n3, k); //--------------------------------------------------------------------- // showall(z,n1,n2,n3); //--------------------------------------------------------------------- } void showall(void *oz, int n1, int n2, int n3) { double (*z)[n2][n1] = (double (*)[n2][n1]) oz; int i1, i2, i3; int m1, m2, m3; m1 = ((n1) < (18) ? (n1) : (18)); m2 = ((n2) < (14) ? (n2) : (14)); m3 = ((n3) < (18) ? (n3) : (18)); printf(" \n"); /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : printf#1154{printf("%6.3f", z[i3][i2][i1])} printf#1156{printf("\n")} printf#1158{printf(" - - - - - - - \n")} ****************************************/ for(i3 = 0; i3 < m3; i3++) { /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : printf#1154{printf("%6.3f", z[i3][i2][i1])} printf#1156{printf("\n")} ****************************************/ for(i1 = 0; i1 < m1; i1++) { /*************** Clava msgError ************** Variables Access as passed arguments Can not be traced inside of function calls : printf#1154{printf("%6.3f", z[i3][i2][i1])} ****************************************/ for(i2 = 0; i2 < m2; i2++) { printf("%6.3f", z[i3][i2][i1]); } printf("\n"); } printf(" - - - - - - - \n"); } printf(" \n"); } //--------------------------------------------------------------------- // power raises an integer, disguised as a double // precision real, to an integer power //--------------------------------------------------------------------- double power(double a, int n) { double aj; int nj; double rdummy; double power; power = 1.0; nj = n; aj = a; while(nj != 0) { if((nj % 2) == 1) rdummy = randlc(&power, aj); rdummy = randlc(&aj, aj); nj = nj / 2; } return power; } //--------------------------------------------------------------------- // bubble does a bubble sort in direction dir //--------------------------------------------------------------------- void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2], int m, int ind) { double temp; int i, j_temp; if(ind == 1) { /*************** Clava msgError ************** Loop contains Invalid Statement -> ReturnStmt#1212 ****************************************/ for(i = 0; i < m - 1; i++) { if(ten[i][ind] > ten[i + 1][ind]) { temp = ten[i + 1][ind]; ten[i + 1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i + 1][ind]; j1[i + 1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i + 1][ind]; j2[i + 1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i + 1][ind]; j3[i + 1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } else { /*************** Clava msgError ************** Loop contains Invalid Statement -> ReturnStmt#1236 ****************************************/ for(i = 0; i < m - 1; i++) { if(ten[i][ind] < ten[i + 1][ind]) { temp = ten[i + 1][ind]; ten[i + 1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i + 1][ind]; j1[i + 1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i + 1][ind]; j2[i + 1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i + 1][ind]; j3[i + 1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } } void zero3(void *oz, int n1, int n2, int n3) { double (*z)[n2][n1] = (double (*)[n2][n1]) oz; int i1, i2, i3; #pragma omp parallel for default(shared) private(i3, i2, i1) firstprivate(n3, n2, n1) for(i3 = 0; i3 < n3; i3++) { // #pragma omp parallel for default(shared) private(i2, i1) firstprivate(n2, n1, i3) for(i2 = 0; i2 < n2; i2++) { // #pragma omp parallel for default(shared) private(i1) firstprivate(n1, i3, i2) for(i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } } double randlc(double *x, double a) { //-------------------------------------------------------------------- // // This routine returns a uniform pseudorandom double precision number in the // range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The returned value RANDLC is normalized to be // between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain // the new seed x_1, so that subsequent calls to RANDLC using the same // arguments will generate a continuous sequence. // // This routine should produce the same results on any computer with at least // 48 mantissa bits in double precision floating point data. On 64 bit // systems, double precision should be disabled. // // David H. Bailey October 26, 1990 // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; double const r23 = 1.1920928955078125e-07; double const r46 = r23 * r23; double const t23 = 8.388608e+06; double const t46 = t23 * t23; double t1, t2, t3, t4, a1, a2, x1, x2, z; double r; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3); *x = t3 - t46 * t4; r = r46 * (*x); return r; } void vranlc(int n, double *x, double a, double y[]) { //-------------------------------------------------------------------- // // This routine generates N uniform pseudorandom double precision numbers in // the range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The N results are placed in Y and are normalized // to be between 0 and 1. X is updated to contain the new seed, so that // subsequent calls to VRANLC using the same arguments will generate a // continuous sequence. If N is zero, only initialization is performed, and // the variables X, A and Y are ignored. // // This routine is the standard version designed for scalar or RISC systems. // However, it should produce the same results on any single processor // computer with at least 48 mantissa bits in double precision floating point // data. On 64 bit systems, double precision should be disabled. // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; double const r23 = 1.1920928955078125e-07; double const r46 = r23 * r23; double const t23 = 8.388608e+06; double const t46 = t23 * t23; double t1, t2, t3, t4, a1, a2, x1, x2, z; int i; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Generate N results. This loop is not vectorizable. //-------------------------------------------------------------------- /*************** Clava msgError ************** Variable x could not be categorized into any OpenMP Variable Scopeuse : RWR ****************************************/ for(i = 0; i < n; i++) { //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3); *x = t3 - t46 * t4; y[i] = r46 * (*x); } return; } void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) { char size[16]; int j; printf("\n\n %s Benchmark Completed.\n", name); printf(" Class = %12c\n", class); // If this is not a grid-based problem (EP, FT, CG), then // we only print n1, which contains some measure of the // problem size. In that case, n2 and n3 are both zero. // Otherwise, we print the grid size n1xn2xn3 if((n2 == 0) && (n3 == 0)) { if((name[0] == 'E') && (name[1] == 'P')) { sprintf(size, "%15.0lf", pow(2.0, n1)); j = 14; if(size[j] == '.') { size[j] = ' '; j--; } size[j + 1] = '\0'; printf(" Size = %15s\n", size); } else { printf(" Size = %12d\n", n1); } } else { printf(" Size = %4dx%4dx%4d\n", n1, n2, n3); } printf(" Iterations = %12d\n", niter); printf(" Time in seconds = %12.2lf\n", t); printf(" Mop/s total = %15.2lf\n", mops); printf(" Operation type = %24s\n", optype); if(verified) printf(" Verification = %12s\n", "SUCCESSFUL"); else printf(" Verification = %12s\n", "UNSUCCESSFUL"); } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *) 0); if(sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec; } /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time() { double t; wtime(&t); return (t); } /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear(int n) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start(int n) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop(int n) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read(int n) { return (elapsed[n]); }
pacset_rf_regressor.h
#ifndef PACSET_RF_REG #define PACSET_RF_REG #include <vector> #include <unordered_set> #include <fstream> #include "pacset_base_model.h" #include "packer.h" #include "config.h" #include "json_reader.h" #include "utils.h" #include "node.h" #include "MemoryMapped.h" #define NUM_FILES 10 #define BLOCK_LOGGING 1 template <typename T, typename F> class PacsetRandomForestRegressor: public PacsetBaseModel<T, F> { public: inline void setMembers(const std::vector<int> &bin_sizes, const std::vector<int> &bin_node_sizes, const std::vector<std::vector<int>> &bin_start){ PacsetBaseModel<T, F>::bin_sizes.clear(); std::copy(bin_sizes.begin(), bin_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_sizes)); std::copy(bin_node_sizes.begin(), bin_node_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_node_sizes)); for (auto i: bin_start) PacsetBaseModel<T, F>::bin_start.push_back(i); } inline void setBinNodeSizes(int pos, int siz){ PacsetBaseModel<T, F>::bin_node_sizes[pos] = siz; } inline void loadModel() { JSONReader<T, F> J; //J.convertSklToBins(PacsetBaseModel<T, F>::bins, J.convertSklToBinsRapidJson(PacsetBaseModel<T, F>::bins, PacsetBaseModel<T, F>::bin_sizes, PacsetBaseModel<T, F>::bin_start, PacsetBaseModel<T, F>::bin_node_sizes); } inline void pack(){ std::string layout = Config::getValue("layout"); auto bin = PacsetBaseModel<T, F>::bins[0]; int num_bins = std::stoi(Config::getValue("numthreads")); for(int i=0; i<num_bins; ++i){ Packer<T, F> packer_obj(layout); if(Config::getValue("intertwine") != std::string("notfound")) packer_obj.setDepthIntertwined(std::atoi(Config::getValue("intertwine").c_str())); //should pack in place packer_obj.pack(PacsetBaseModel<T, F>::bins[i], PacsetBaseModel<T, F>::bin_sizes[i], PacsetBaseModel<T, F>::bin_start[i] ); setBinNodeSizes(i, PacsetBaseModel<T, F>::bins[i].size()); } } inline int mmapAndPredict(const std::vector<T>& observation, std::vector<double> &preds, int obsnum) { int num_classes = std::stoi(Config::getValue("numclasses")); int num_threads = std::stoi(Config::getValue("numthreads")); int num_bins = PacsetBaseModel<T, F>::bin_sizes.size(); std::vector<double> elapsed_arr; std::string modelfname = Config::getValue("modelfilename"); MemoryMapped mmapped_obj(modelfname.c_str(), 0); Node<T, F> *data = (Node<T, F>*)mmapped_obj.getData(); std::unordered_set<int> blocks_accessed; int next_node = 0; int block_offset = 0; int offset = 0; double leaf_sum = 0; std::vector<int> offsets; int curr_offset = 0; int total_num_trees = 0; for (auto val: PacsetBaseModel<T, F>::bin_node_sizes){ offsets.push_back(curr_offset); curr_offset += val; } #pragma omp parallel for num_threads(num_threads) for(int bin_counter=0; bin_counter<num_bins; ++bin_counter){ int block_number = 0; Node<T, F> *bin = data + offsets[bin_counter]; std::vector<int> curr_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); std::vector<double> last_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); int i, feature_num=0, number_not_in_leaf=0; T feature_val; int siz = PacsetBaseModel<T, F>::bin_sizes[bin_counter]; total_num_trees += siz; for(i=0; i<siz; ++i){ curr_node[i] = PacsetBaseModel<T, F>::bin_start[bin_counter][i]; //bin[curr_node[i]].printNode(); __builtin_prefetch(&bin[curr_node[i]], 0, 3); #ifdef BLOCK_LOGGING block_number = (curr_node[i] + block_offset) / BLOCK_SIZE; #pragma omp critical blocks_accessed.insert(block_number); #endif } do{ number_not_in_leaf = 0; for( i=0; i<siz; ++i){ if(curr_node[i] > 0){ feature_num = bin[curr_node[i]].getFeature(); feature_val = observation[feature_num]; if(bin[curr_node[i]].getLeft() == -1){ last_node[i] = bin[curr_node[i]].getThreshold(); curr_node[i] = -1; } else { curr_node[i] = bin[curr_node[i]].nextNode(feature_val); __builtin_prefetch(&bin[curr_node[i]], 0, 3); ++number_not_in_leaf; } } } }while(number_not_in_leaf); double sum=0; for(i=0; i<siz; ++i){ sum += last_node[i]; } leaf_sum +=sum; block_offset += PacsetBaseModel<T, F>::bin_node_sizes[bin_counter]; } preds.clear(); preds.push_back((double)leaf_sum); preds.push_back((double)total_num_trees); #ifdef BLOCK_LOGGING return blocks_accessed.size(); #else return 0; #endif } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<int>& preds, std::vector<int>&results, bool mmap) { } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<double>& preds, std::vector<double>&results, bool mmap) { //Predicts the class for a vector of observations //By calling predict for a single observation and //tallying the observations // int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins; std::vector<double> elapsed_arr; int blocks; std::vector<int> num_blocks; int ct=1; for(auto single_obs : observation){ auto start = std::chrono::steady_clock::now(); if (mmap) blocks = mmapAndPredict(single_obs, preds, ct); else{ blocks = mmapAndPredict(single_obs, preds, ct); } num_blocks.push_back(blocks); results.push_back((double)preds[0] / (double)preds[1] ); auto end = std::chrono::steady_clock::now(); ct+=1; } } inline void serialize() { auto bins = PacsetBaseModel<T, F>::bins; int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins = bins.size(); std::vector<int> bin_sizes = PacsetBaseModel<T, F>::bin_sizes; std::vector<int> bin_node_sizes = PacsetBaseModel<T, F>::bin_node_sizes; std::vector<std::vector<int>> bin_start = PacsetBaseModel<T, F>::bin_start; std::string format = Config::getValue("format"); //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename std::string filename; if(Config::getValue("metadatafilename") == std::string("notfound")) filename = "metadata.txt"; else filename = Config::getValue("metadatafilename"); std::fstream fout; fout.open(filename, std::ios::out ); //Number of classes fout<<num_classes<<"\n"; //Number of bins fout<<num_bins<<"\n"; //Number of trees in each bin for(auto i: bin_sizes){ fout<<i<<"\n"; } //Number of nodes in each bin for(auto i: bin_node_sizes){ fout<<i<<"\n"; } //start position of each bin for(auto bin: bin_start){ for(auto tree_start: bin){ fout<<tree_start<<"\n"; } } fout.close(); if(format == std::string("notfound") || format == std::string("binary")){ std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.bin"; //Write the nodes fout.open(filename, std::ios::binary | std::ios::out ); Node<T, F> node_to_write; for(auto bin: bins){ for(auto node: bin){ node_to_write = node; fout.write((char*)&node_to_write, sizeof(node_to_write)); } } fout.close(); } else{ //Write the nodes std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.txt"; std::cout<<"filename: "<<filename <<"\n"; fout.open(filename, std::ios::out ); for(auto bin: bins){ for(auto node: bin){ fout<<node.getLeft()<<", "<<node.getRight() <<", "<<node.getFeature()<<", "<<node.getThreshold()<<"\n"; } } fout.close(); } } inline void deserialize(){ //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename int num_classes, num_bins; std::string filename = Config::getValue("metadatafilename"); //std::string filename = "metadata.txt"; std::fstream f; f.open(filename, std::ios::in ); //Number of classes f>>num_classes; Config::setConfigItem("numclasses", std::to_string(num_classes)); //Number of bins f>>num_bins; Config::setConfigItem("numthreads", std::to_string(num_bins)); std::vector<int> num_trees_bin; std::vector<int> num_nodes_bin; std::vector<std::vector<int>> bin_tree_start; int val; //Number of trees in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_trees_bin.push_back(val); } //Number of nodes in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_nodes_bin.push_back(val); } std::vector<int> temp; //start position of each bin for(int i=0; i<num_bins; ++i){ for(int j=0; j<num_trees_bin[i]; ++j){ f>>val; temp.push_back(val); } bin_tree_start.push_back(temp); temp.clear(); } f.close(); setMembers(num_trees_bin, num_nodes_bin, bin_tree_start); } }; #endif
9785.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 1; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 2 ? t4 + 31 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 128) for (t10 = t8; t10 <= (ny - 2 < t8 + 127 ? ny - 2 : t8 + 127); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }