source
stringlengths
3
92
c
stringlengths
26
2.25M
linalg_naive.h
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace MLCommon { namespace LinAlg { namespace Naive { /** * @brief CPU sequential version of the Kronecker product * * @note All the matrices are in column-major order * * @tparam DataT Type of the data * @param[out] K Pointer to the result of the Kronecker product A (x) B * @param[in] A Matrix A * @param[in] B Matrix B * @param[in] m Rows of matrix A * @param[in] n Columns of matrix B * @param[in] p Rows of matrix A * @param[in] q Columns of matrix B */ template <typename DataT> void kronecker(DataT *K, const DataT *A, const DataT *B, int m, int n, int p, int q) { int k_m = m * p; #pragma omp parallel for collapse(2) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { DataT a_ij = A[i + m * j]; for (int v = 0; v < p; v++) { for (int w = 0; w < q; w++) { DataT b_vw = B[v + p * w]; K[i * p + v + (j * q + w) * k_m] = a_ij * b_vw; } } } } } /** * @brief CPU sequential matrix multiplication out = alpha * A*B + beta * out * * @note All the matrices are in column-major order * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] A Matrix A * @param[in] B Matrix B * @param[in] m Rows of A * @param[in] k Columns of A / rows of B * @param[in] n Columns of B * @param[in] alpha Scalar alpha * @param[in] beta Scalar beta */ template <typename DataT> void matMul(DataT *out, const DataT *A, const DataT *B, int m, int k, int n, DataT alpha = 1, DataT beta = 0) { #pragma omp parallel for collapse(2) for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { DataT s = 0.0; for (int r = 0; r < k; r++) { s += A[i + r * m] * B[r + j * k]; } out[i + j * m] = alpha * s + beta * out[i + j * m]; } } } /** * @brief CPU sequential vector add (u + alpha * v) * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] u Vector u * @param[in] v Vector v * @param[in] len Length of the vectors to add * @param[in] alpha Coefficient to multiply the elements of v with */ template <typename DataT> void add(DataT *out, const DataT *u, const DataT *v, int len, DataT alpha = 1.0) { #pragma omp parallel for for (int i = 0; i < len; i++) { out[i] = u[i] + alpha * v[i]; } } /** * @brief CPU lagged matrix * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] in Pointer to the input vector * @param[in] len Length or the vector * @param[in] lags Number of lags */ template <typename DataT> void laggedMat(DataT *out, const DataT *in, int len, int lags) { int lagged_len = len - lags; #pragma omp parallel for for (int lag = 1; lag <= lags; lag++) { DataT *out_ = out + (lag - 1) * lagged_len; const DataT *in_ = in + lags - lag; for (int i = 0; i < lagged_len; i++) { out_[i] = in_[i]; } } } /** * @brief CPU matrix 2D copy * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] in Pointer to the input matrix * @param[in] starting_row Starting row * @param[in] starting_col Starting column * @param[in] in_rows Number of rows in the input matrix * @param[in] out_rows Number of rows in the output matrix * @param[in] out_cols Number of columns in the input matrix */ template <typename DataT> void copy2D(DataT *out, const DataT *in, int starting_row, int starting_col, int in_rows, int out_rows, int out_cols) { #pragma omp parallel for collapse(2) for (int i = 0; i < out_rows; i++) { for (int j = 0; j < out_cols; j++) { out[i + j * out_rows] = in[starting_row + i + (starting_col + j) * in_rows]; } } } /** * @brief CPU first difference of a vector * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] in Pointer to the input vector * @param[in] len Length of the input vector */ template <typename DataT> void diff(DataT *out, const DataT *in, int len) { #pragma omp parallel for for (int i = 0; i < len - 1; i++) { out[i] = in[i + 1] - in[i]; } } } // namespace Naive } // namespace LinAlg } // namespace MLCommon
dorglq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zunglq.c, normal z -> d, Fri Sep 28 17:38:04 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_unglq * * Generates an m-by-n matrix Q with orthonormal rows, which is * defined as the first m rows of a product of the elementary reflectors * returned by plasma_dgelqf. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix Q. m >= 0. * * @param[in] n * The number of columns of the matrix Q. n >= m. * * @param[in] k * The number of rows of elementary tile reflectors whose product * defines the matrix Q. * m >= k >= 0. * * @param[in] pA * Details of the LQ factorization of the original matrix A as returned * by plasma_dgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] T * Auxiliary factorization data, computed by plasma_dgelqf. * * @param[out] pQ * On exit, pointer to the m-by-n matrix Q. * * @param[in] ldq * The leading dimension of the array Q. ldq >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_dorglq * @sa plasma_cunglq * @sa plasma_dorglq * @sa plasma_sorglq * @sa plasma_dgelqf * ******************************************************************************/ int plasma_dorglq(int m, int n, int k, double *pA, int lda, plasma_desc_t T, double *pQ, int ldq) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < m) { plasma_error("illegal value of n"); return -2; } if (k < 0 || k > m) { plasma_error("illegal value of k"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } if (ldq < imax(1, m)) { plasma_error("illegal value of ldq"); return -8; } // quick return if (m <= 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gelqf(plasma, PlasmaRealDouble, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t Q; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, k, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, k, n, &Q); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmlq: work retval = plasma_workspace_create(&work, lwork, PlasmaRealDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); plasma_omp_dge2desc(pQ, ldq, Q, &sequence, &request); // Call the tile async function. plasma_omp_dorglq(A, T, Q, work, &sequence, &request); // Translate Q back to LAPACK layout. plasma_omp_ddesc2ge(Q, pQ, ldq, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&Q); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_unglq * * Non-blocking tile version of plasma_dorglq(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_dgelqf. * * @param[out] Q * Descriptor of matrix Q. On exit, matrix Q stored in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dorglq * @sa plasma_omp_cunglq * @sa plasma_omp_dorglq * @sa plasma_omp_sorglq * @sa plasma_omp_dgelqf * ******************************************************************************/ void plasma_omp_dorglq(plasma_desc_t A, plasma_desc_t T, plasma_desc_t Q, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(Q) != PlasmaSuccess) { plasma_error("invalid Q"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (Q.m <= 0) return; // Set Q to identity. plasma_pdlaset(PlasmaGeneral, 0.0, 1.0, Q, sequence, request); // Construct Q. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pdorglq_tree(A, T, Q, work, sequence, request); } else { plasma_pdorglq(A, T, Q, work, sequence, request); } }
norm1Many.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <cmath> extern "C" void FUNC(norm1Many)(const dlong & Nblocks, const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ normA){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) reduction(+:wa2) #endif for(int fld=0;fld<Nfields;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; wa2 += fabs(ai); } } normA[0] = wa2; }
cg.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB CG code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// //--------------------------------------------------------------------- // program cg //--------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "globals.h" #include "randdp.h" #include "timers.h" #include "print_results.h" #include "../my_include/my_include.h" //--------------------------------------------------------------------- /* common / main_int_mem / */ static int colidx[NZ]; static int rowstr[NA+1]; static int iv[NZ+1+NA]; static int arow[NA+1]; static int acol[NAZ]; /* common / main_flt_mem / */ static double v[NZ]; static double aelt[NAZ]; static double a[NZ]; static double x[NA+2]; static double z[NA+2]; static double p[NA+2]; static double q[NA+2]; static double r[NA+2]; /* common /tinof/ */ static int myid, num_threads, ilow, ihigh; #pragma omp threadprivate(myid, num_threads, ilow, ihigh) #define max_threads 1024 static int last_n[max_threads+1]; /* common / partit_size / */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /urando/ */ static double amult; static double tran; double tflush=0; #pragma omp threadprivate (amult,tran) /* common /timers/ */ static logical timeron; //--------------------------------------------------------------------- //--------------------------------------------------------------------- static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], double v[], int iv[]); static void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int firstrow, int lastrow, int last_n[], double v[], int iv[], int nzloc[], double rcond, double shift); static void sprnvc(int n, int nz, int nn1, double v[], int iv[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); //--------------------------------------------------------------------- int main(int argc, char *argv[]) { int i, j, k, it; /* //EC: candidates crucial_data(x,"double", NA+2); //critical crucial_data(z,"double", NA+2); //critical crucial_data(p,"double", NA+2); crucial_data(q,"double", NA+2); crucial_data(r,"double", NA+2); consistent_data(&it,"int",1); */ double zeta; double rnorm; double norm_temp1, norm_temp2; double t, mflops, tmax; char Class; logical verified; double zeta_verify_value, epsilon, err; char *t_names[T_last]; for (i = 0; i < T_last; i++) { timer_clear(i); } FILE *fp; if ((fp = fopen("timer.flag", "r")) != NULL) { timeron = true; t_names[T_init] = "init"; t_names[T_bench] = "benchmk"; t_names[T_conj_grad] = "conjgd"; fclose(fp); } else { timeron = false; } timer_start(T_init); firstrow = 0; lastrow = NA-1; firstcol = 0; lastcol = NA-1; if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10) { Class = 'S'; zeta_verify_value = 8.5971775078648; } else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12) { Class = 'W'; zeta_verify_value = 10.362595087124; } else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20) { Class = 'A'; zeta_verify_value = 17.130235054029; } else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60) { Class = 'B'; zeta_verify_value = 22.712745482631; } else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110) { Class = 'C'; zeta_verify_value = 28.973605592845; } else if (NA == 1500000 && NONZER == 21 && NITER == 100 && SHIFT == 500) { Class = 'D'; zeta_verify_value = 52.514532105794; } else if (NA == 9000000 && NONZER == 26 && NITER == 100 && SHIFT == 1500) { Class = 'E'; zeta_verify_value = 77.522164599383; } else { Class = 'U'; } printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - CG Benchmark\n\n"); printf(" Size: %11d\n", NA); printf(" Iterations: %5d\n", NITER); printf(" Number of available threads: %5d\n", omp_get_max_threads()); printf("\n"); naa = NA; nzz = NZ; //--------------------------------------------------------------------- // Inialize random number generator //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(i,j,k,zeta) { tran = 314159265.0; amult = 1220703125.0; zeta = randlc(&tran, amult); //--------------------------------------------------------------------- // //--------------------------------------------------------------------- makea(naa, nzz, a, colidx, rowstr, firstrow, lastrow, firstcol, lastcol, arow, (int (*)[NONZER+1])(void*)acol, (double (*)[NONZER+1])(void*)aelt, v, iv); #pragma omp barrier //--------------------------------------------------------------------- // Note: as a result of the above call to makea: // values of j used in indexing rowstr go from 0 --> lastrow-firstrow // values of colidx which are col indexes go from firstcol --> lastcol // So: // Shift the col index vals from actual (firstcol --> lastcol ) // to local, i.e., (0 --> lastcol-firstcol) //--------------------------------------------------------------------- #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { colidx[k] = colidx[k] - firstcol; } } //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- #pragma omp for for (i = 0; i < NA+1; i++) { x[i] = 1.0; } #pragma omp for for (j = 0; j < lastcol - firstcol + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } } zeta = 0.0; //--------------------------------------------------------------------- //----> // Do one iteration untimed to init all code and data page tables //----> (then reinit, start timing, to niter its) //--------------------------------------------------------------------- for (it = 1; it <= 1; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; #pragma omp parallel for default(shared) private(j) \ reduction(+:norm_temp1,norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j] * z[j]; norm_temp2 = norm_temp2 + z[j] * z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } } // end of do one iteration untimed //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i) for (i = 0; i < NA+1; i++) { x[i] = 1.0; } zeta = 0.0; timer_stop(T_init); printf(" Initialization time = %15.3f seconds\n", timer_read(T_init)); timer_start(T_bench); //--------------------------------------------------------------------- //----> // Main Iteration for inverse power method //----> //--------------------------------------------------------------------- tflush = 0; flush_whole_cache(); start_crash(); for (it = 1; it <= NITER; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- if (timeron) timer_start(T_conj_grad); conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); if (timeron) timer_stop(T_conj_grad); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; #pragma omp parallel for default(shared) private(j) \ reduction(+:norm_temp1,norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j]*z[j]; norm_temp2 = norm_temp2 + z[j]*z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); zeta = SHIFT + 1.0 / norm_temp1; if (it == 1) printf("\n iteration ||r|| zeta\n"); printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } //EasyCrash: flush critical data objects //timer_clear(T_flush); //timer_start(T_flush); EC(&p, sizeof(p)); EC(&q, sizeof(q)); clflush(&it); mfence(); //timer_stop(T_flush); //tflush += timer_read(T_flush); /* //checkpoint checkpoint(&p, sizeof(p)); checkpoint(&q, sizeof(q)); checkpoint(&r, sizeof(r)); checkpoint(&z, sizeof(z)); checkpoint(&x, sizeof(x)); clflush(&it); mfence(); */ } // end of main iter inv pow meth end_crash(); timer_stop(T_bench); //--------------------------------------------------------------------- // End of timed section //--------------------------------------------------------------------- t = timer_read(T_bench); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (Class != 'U') { err = fabs(zeta - zeta_verify_value) / zeta_verify_value; if (err <= epsilon) { verified = true; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.13E\n", zeta); printf(" Error is %20.13E\n", err); printf("Thread rank: %d\n", num_threads); printf(" Cache flushing overhead is %lf\n", tflush/num_threads ); } else { verified = false; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.13E\n", zeta); printf(" The correct zeta is %20.13E\n", zeta_verify_value); } } else { verified = false; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { mflops = (double)(2*NITER*NA) * (3.0+(double)(NONZER*(NONZER+1)) + 25.0*(5.0+(double)(NONZER*(NONZER+1))) + 3.0) / t / 1000000.0; } else { mflops = 0.0; } print_results("CG", Class, NA, 0, 0, NITER, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); //--------------------------------------------------------------------- // More timers //--------------------------------------------------------------------- if (timeron) { tmax = timer_read(T_bench); if (tmax == 0.0) tmax = 1.0; printf(" SECTION Time (secs)\n"); for (i = 0; i < T_last; i++) { t = timer_read(i); if (i == T_init) { printf(" %8s:%9.3f\n", t_names[i], t); } else { printf(" %8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100.0/tmax); if (i == T_conj_grad) { t = tmax - t; printf(" --> %8s:%9.3f (%6.2f%%)\n", "rest", t, t*100.0/tmax); } } } } return 0; } //--------------------------------------------------------------------- // Floaging point arrays here are named as in NPB1 spec discussion of // CG algorithm //--------------------------------------------------------------------- static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) { int j, k; int cgit, cgitmax = 25; double d, sum, rho, rho0, alpha, beta, suml; rho = 0.0; sum = 0.0; #pragma omp parallel default(shared) private(j,k,cgit,suml,alpha,beta) \ shared(d,rho0,rho,sum) { //--------------------------------------------------------------------- // Initialize the CG algorithm: //--------------------------------------------------------------------- #pragma omp for for (j = 0; j < naa+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; } //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally... //--------------------------------------------------------------------- #pragma omp for reduction(+:rho) for (j = 0; j < lastcol - firstcol + 1; j++) { rho = rho + r[j]*r[j]; } //--------------------------------------------------------------------- //----> // The conj grad iteration loop //----> //--------------------------------------------------------------------- for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp master { //--------------------------------------------------------------------- // Save a temporary of rho and initialize reduction variables //--------------------------------------------------------------------- rho0 = rho; d = 0.0; rho = 0.0; } #pragma omp barrier //--------------------------------------------------------------------- // q = A.p // The partition submatrix-vector multiply: use workspace w //--------------------------------------------------------------------- // // NOTE: this version of the multiply is actually (slightly: maybe %5) // faster on the sp2 on 16 nodes than is the unrolled-by-2 version // below. On the Cray t3d, the reverse is true, i.e., the // unrolled-by-two version is some 10% faster. // The unrolled-by-8 version below is significantly faster // on the Cray t3d - overall speed of code is 1.5 times faster. #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { suml = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { suml = suml + a[k]*p[colidx[k]]; } q[j] = suml; } /* for (j = 0; j < lastrow - firstrow + 1; j++) { int i = rowstr[j]; int iresidue = (rowstr[j+1] - i) % 2; double sum1 = 0.0; double sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i + iresidue; k <= rowstr[j+1] - 2; k += 2) { sum1 = sum1 + a[k] *p[colidx[k]]; sum2 = sum2 + a[k+1]*p[colidx[k+1]]; } q[j] = sum1 + sum2; } */ /* for (j = 0; j < lastrow - firstrow + 1; j++) { int i = rowstr[j]; int iresidue = (rowstr[j+1] - i) % 8; suml = 0.0; for (k = i; k <= i + iresidue - 1; k++) { suml = suml + a[k]*p[colidx[k]]; } for (k = i + iresidue; k <= rowstr[j+1] - 8; k += 8) { suml = suml + a[k ]*p[colidx[k ]] + a[k+1]*p[colidx[k+1]] + a[k+2]*p[colidx[k+2]] + a[k+3]*p[colidx[k+3]] + a[k+4]*p[colidx[k+4]] + a[k+5]*p[colidx[k+5]] + a[k+6]*p[colidx[k+6]] + a[k+7]*p[colidx[k+7]]; } q[j] = suml; } */ //--------------------------------------------------------------------- // Obtain p.q //--------------------------------------------------------------------- #pragma omp for reduction(+:d) for (j = 0; j < lastcol - firstcol + 1; j++) { d = d + p[j]*q[j]; } //--------------------------------------------------------------------- // Obtain alpha = rho / (p.q) //--------------------------------------------------------------------- alpha = rho0 / d; //--------------------------------------------------------------------- // Obtain z = z + alpha*p // and r = r - alpha*q //--------------------------------------------------------------------- #pragma omp for reduction(+:rho) for (j = 0; j < lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally.. //--------------------------------------------------------------------- rho = rho + r[j]*r[j]; } //--------------------------------------------------------------------- // Obtain beta: //--------------------------------------------------------------------- beta = rho / rho0; //--------------------------------------------------------------------- // p = r + beta*p //--------------------------------------------------------------------- #pragma omp for for (j = 0; j < lastcol - firstcol + 1; j++) { p[j] = r[j] + beta*p[j]; } /* //EasyCrash: flush critical data objects //timer_clear(T_flush); //timer_start(T_flush); flush_dcache_range(&p[0], &p[NA]+sizeof(double)); flush_dcache_range(&q[0], &q[NA]+sizeof(double)); //flush_dcache_range(&z[0], &z[NA]+sizeof(double)); //flush_dcache_range(&r[0], &r[NA]+sizeof(double)); //flush_dcache_range(&x[0], &x[NA]+sizeof(double)); clflush(&cgit); mfence(); //timer_stop(T_flush); // tflush += timer_read(T_flush); */ } // end of do cgit=1,cgitmax //--------------------------------------------------------------------- // Compute residual norm explicitly: ||r|| = ||x - A.z|| // First, form A.z // The partition submatrix-vector multiply //--------------------------------------------------------------------- #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { suml = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { suml = suml + a[k]*z[colidx[k]]; } r[j] = suml; } //--------------------------------------------------------------------- // At this point, r contains A.z //--------------------------------------------------------------------- #pragma omp for reduction(+:sum) nowait for (j = 0; j < lastcol-firstcol+1; j++) { suml = x[j] - r[j]; sum = sum + suml*suml; } } *rnorm = sqrt(sum); } //--------------------------------------------------------------------- // generate the test problem for benchmark 6 // makea generates a sparse matrix with a // prescribed sparsity distribution // // parameter type usage // // input // // n i number of cols/rows of matrix // nz i nonzeros as declared array size // rcond r*8 condition number // shift r*8 main diagonal shift // // output // // a r*8 array for nonzeros // colidx i col indices // rowstr i row pointers // // workspace // // iv, arow, acol i // aelt r*8 //--------------------------------------------------------------------- static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], double v[], int iv[]) { int iouter, ivelt, nzv, nn1; int ivc[NONZER+1]; double vc[NONZER+1]; //--------------------------------------------------------------------- // nonzer is approximately (int(sqrt(nnza /n))); //--------------------------------------------------------------------- int work; //--------------------------------------------------------------------- // nn1 is the smallest power of two not less than n //--------------------------------------------------------------------- nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); //--------------------------------------------------------------------- // Generate nonzero positions and save for the use in sparse. //--------------------------------------------------------------------- num_threads = omp_get_num_threads(); myid = omp_get_thread_num(); if (num_threads > max_threads) { if (myid == 0) { printf(" Warning: num_threads%6d exceeded an internal limit%6d\n", num_threads, max_threads); } num_threads = max_threads; } work = (n + num_threads - 1)/num_threads; ilow = work * myid; ihigh = ilow + work; if (ihigh > n) ihigh = n; for (iouter = 0; iouter < ihigh; iouter++) { nzv = NONZER; sprnvc(n, nzv, nn1, vc, ivc); if (iouter >= ilow) { vecset(n, vc, ivc, &nzv, iouter+1, 0.5); arow[iouter] = nzv; for (ivelt = 0; ivelt < nzv; ivelt++) { acol[iouter][ivelt] = ivc[ivelt] - 1; aelt[iouter][ivelt] = vc[ivelt]; } } } #pragma omp barrier //--------------------------------------------------------------------- // ... make the sparse matrix from list of elements with duplicates // (v and iv are used as workspace) //--------------------------------------------------------------------- sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol, aelt, firstrow, lastrow, last_n, v, &iv[0], &iv[nz], RCOND, SHIFT); } //--------------------------------------------------------------------- // rows range from firstrow to lastrow // the rowstr pointers are defined for nrows = lastrow-firstrow+1 values //--------------------------------------------------------------------- static void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int firstrow, int lastrow, int last_n[], double v[], int iv[], int nzloc[], double rcond, double shift) { int nrows; //--------------------------------------------------- // generate a sparse matrix from a list of // [col, row, element] tri //--------------------------------------------------- int i, j, j1, j2, nza, k, kk, nzrow, jcol; double size, scale, ratio, va; logical cont40; //--------------------------------------------------------------------- // how many rows of result //--------------------------------------------------------------------- nrows = lastrow - firstrow + 1; j1 = ilow + 1; j2 = ihigh + 1; //--------------------------------------------------------------------- // ...count the number of triples in each row //--------------------------------------------------------------------- for (j = j1; j < j2; j++) { rowstr[j] = 0; } for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza]; if (j >= ilow && j < ihigh) { j = j + 1; rowstr[j] = rowstr[j] + arow[i]; } } } if (myid == 0) { rowstr[0] = 0; j1 = 0; } for (j = j1+1; j < j2; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } if (myid < num_threads) last_n[myid] = rowstr[j2-1]; #pragma omp barrier nzrow = 0; if (myid < num_threads) { for (i = 0; i < myid; i++) { nzrow = nzrow + last_n[i]; } } if (nzrow > 0) { for (j = j1; j < j2; j++) { rowstr[j] = rowstr[j] + nzrow; } } #pragma omp barrier nza = rowstr[nrows] - 1; //--------------------------------------------------------------------- // ... rowstr(j) now is the location of the first nonzero // of row j of a //--------------------------------------------------------------------- if (nza > nz) { #pragma omp master { printf("Space for matrix elements exceeded in sparse\n"); printf("nza, nzmax = %d, %d\n", nza, nz); } exit(EXIT_FAILURE); } //--------------------------------------------------------------------- // ... preload data pages //--------------------------------------------------------------------- for (j = ilow; j < ihigh; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { v[k] = 0.0; iv[k] = -1; } nzloc[j] = 0; } //--------------------------------------------------------------------- // ... generate actual values by summing duplicates //--------------------------------------------------------------------- size = 1.0; ratio = pow(rcond, (1.0 / (double)(n))); for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza]; if (j < ilow || j >= ihigh) continue; scale = size * aelt[i][nza]; for (nzrow = 0; nzrow < arow[i]; nzrow++) { jcol = acol[i][nzrow]; va = aelt[i][nzrow] * scale; //-------------------------------------------------------------------- // ... add the identity * rcond to the generated matrix to bound // the smallest eigenvalue from below by rcond //-------------------------------------------------------------------- if (jcol == j && j == i) { va = va + rcond - shift; } cont40 = false; for (k = rowstr[j]; k < rowstr[j+1]; k++) { if (iv[k] > jcol) { //---------------------------------------------------------------- // ... insert colidx here orderly //---------------------------------------------------------------- for (kk = rowstr[j+1]-2; kk >= k; kk--) { if (iv[kk] > -1) { v[kk+1] = v[kk]; iv[kk+1] = iv[kk]; } } iv[k] = jcol; v[k] = 0.0; cont40 = true; break; } else if (iv[k] == -1) { iv[k] = jcol; cont40 = true; break; } else if (iv[k] == jcol) { //-------------------------------------------------------------- // ... mark the duplicated entry //-------------------------------------------------------------- nzloc[j] = nzloc[j] + 1; cont40 = true; break; } } if (cont40 == false) { printf("internal error in sparse: i=%d\n", i); exit(EXIT_FAILURE); } v[k] = v[k] + va; } } size = size * ratio; } #pragma omp barrier //--------------------------------------------------------------------- // ... remove empty entries and generate final results //--------------------------------------------------------------------- for (j = ilow+1; j < ihigh; j++) { nzloc[j] = nzloc[j] + nzloc[j-1]; } if (myid < num_threads) last_n[myid] = nzloc[ihigh-1]; #pragma omp barrier nzrow = 0; if (myid < num_threads) { for (i = 0; i < myid; i++) { nzrow = nzrow + last_n[i]; } } if (nzrow > 0) { for (j = ilow; j < ihigh; j++) { nzloc[j] = nzloc[j] + nzrow; } } #pragma omp barrier #pragma omp for for (j = 0; j < nrows; j++) { if (j > 0) { j1 = rowstr[j] - nzloc[j-1]; } else { j1 = 0; } j2 = rowstr[j+1] - nzloc[j]; nza = rowstr[j]; for (k = j1; k < j2; k++) { a[k] = v[nza]; colidx[k] = iv[nza]; nza = nza + 1; } } #pragma omp for for (j = 1; j < nrows+1; j++) { rowstr[j] = rowstr[j] - nzloc[j-1]; } nza = rowstr[nrows] - 1; } //--------------------------------------------------------------------- // generate a sparse n-vector (v, iv) // having nzv nonzeros // // mark(i) is set to 1 if position i is nonzero. // mark is all zero on entry and is reset to all zero before exit // this corrects a performance bug found by John G. Lewis, caused by // reinitialization of mark on every one of the n calls to sprnvc //--------------------------------------------------------------------- static void sprnvc(int n, int nz, int nn1, double v[], int iv[]) { int nzv, ii, i; double vecelt, vecloc; nzv = 0; while (nzv < nz) { vecelt = randlc(&tran, amult); //--------------------------------------------------------------------- // generate an integer between 1 and n in a portable manner //--------------------------------------------------------------------- vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; //--------------------------------------------------------------------- // was this integer generated already? //--------------------------------------------------------------------- logical was_gen = false; for (ii = 0; ii < nzv; ii++) { if (iv[ii] == i) { was_gen = true; break; } } if (was_gen) continue; v[nzv] = vecelt; iv[nzv] = i; nzv = nzv + 1; } } //--------------------------------------------------------------------- // scale a double precision number x in (0,1) by a power of 2 and chop it //--------------------------------------------------------------------- static int icnvrt(double x, int ipwr2) { return (int)(ipwr2 * x); } //--------------------------------------------------------------------- // set ith element of sparse vector (v, iv) with // nzv nonzeros to val //--------------------------------------------------------------------- static void vecset(int n, double v[], int iv[], int *nzv, int i, double val) { int k; logical set; set = false; for (k = 0; k < *nzv; k++) { if (iv[k] == i) { v[k] = val; set = true; } } if (set == false) { v[*nzv] = val; iv[*nzv] = i; *nzv = *nzv + 1; } }
ddm_matrix_assembly.c
// // Created by bergolho on 25/07/19. // #include <stdbool.h> #include <stdint.h> #include <stdlib.h> #include <time.h> #include "../alg/grid/grid.h" #include "../config/assembly_matrix_config.h" #include "../monodomain/constants.h" #include "../utils/utils.h" #include "../single_file_libraries/stb_ds.h" #include "../config_helpers/config_helpers.h" INIT_ASSEMBLY_MATRIX(set_initial_conditions_fvm) { real_cpu alpha; struct cell_node **ac = the_grid->active_cells; uint32_t active_cells = the_grid->num_active_cells; real_cpu beta = the_solver->beta; real_cpu cm = the_solver->cm; real_cpu dt = the_solver->dt; int i; #pragma omp parallel for private(alpha) for(i = 0; i < active_cells; i++) { alpha = ALPHA(beta, cm, dt, ac[i]->discretization.x, ac[i]->discretization.y, ac[i]->discretization.z); ac[i]->v = initial_v; ac[i]->b = initial_v * alpha; } } static struct element fill_element_ddm (uint32_t position, char direction, real_cpu dx, real_cpu dy, real_cpu dz,\ const real_cpu sigma_x, const real_cpu sigma_y, const real_cpu sigma_z,\ const real_cpu kappa_x, const real_cpu kappa_y, const real_cpu kappa_z,\ const real_cpu dt,\ struct element *cell_elements); void create_sigma_low_block(struct cell_node* ac,real_cpu x_left, real_cpu x_right, real_cpu y_down, real_cpu y_up,double b_sigma_x, double b_sigma_y, double b_sigma_z,double sigma_factor) { real_cpu x = ac->center.x; real_cpu y = ac->center.y; if( (x<=x_right) && (x>= x_left) && (y>= y_down) && (y<= y_up)) { ac->sigma.x = b_sigma_x*sigma_factor; ac->sigma.y = b_sigma_y*sigma_factor; ac->sigma.z = b_sigma_z*sigma_factor; } } void calculate_kappa_elements(struct monodomain_solver *the_solver, struct grid *the_grid,\ const real_cpu cell_length_x, const real_cpu cell_length_y, const real_cpu cell_length_z) { uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; real_cpu beta = the_solver->beta; real_cpu cm = the_solver->cm; #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { ac[i]->kappa.x = KAPPA(beta,cm,cell_length_x,ac[i]->discretization.x); ac[i]->kappa.y = KAPPA(beta,cm,cell_length_y,ac[i]->discretization.y); ac[i]->kappa.z = KAPPA(beta,cm,cell_length_z,ac[i]->discretization.z); } } struct element fill_element_ddm (uint32_t position, char direction, real_cpu dx, real_cpu dy, real_cpu dz,\ const real_cpu sigma_x, const real_cpu sigma_y, const real_cpu sigma_z,\ const real_cpu kappa_x, const real_cpu kappa_y, const real_cpu kappa_z,\ const real_cpu dt,\ struct element *cell_elements) { real_cpu multiplier; struct element new_element; new_element.column = position; new_element.direction = direction; // Z direction if(direction == 'n') { multiplier = ((dx * dy) / dz); new_element.value = ( multiplier * (-sigma_z - (kappa_z / dt)) ); cell_elements[0].value += ( multiplier * (sigma_z + (kappa_z / dt)) ); } // Z direction else if(direction == 's') { multiplier = ((dx * dy) / dz); new_element.value = ( multiplier * (-sigma_z - (kappa_z / dt)) ); cell_elements[0].value += ( multiplier * (sigma_z + (kappa_z / dt)) ); } // Y direction else if(direction == 'e') { multiplier = ((dx * dz) / dy); new_element.value = ( multiplier * (-sigma_y - (kappa_y / dt)) ); cell_elements[0].value += ( multiplier * (sigma_y + (kappa_y / dt)) ); } // Y direction else if(direction == 'w') { multiplier = ((dx * dz) / dy); new_element.value = ( multiplier * (-sigma_y - (kappa_y / dt)) ); cell_elements[0].value += ( multiplier * (sigma_y + (kappa_y / dt)) ); } // X direction else if(direction == 'f') { multiplier = ((dy * dz) / dx); new_element.value = ( multiplier * (-sigma_x - (kappa_x / dt)) ); cell_elements[0].value += ( multiplier * (sigma_x + (kappa_x / dt)) ); } // X direction else if(direction == 'b') { multiplier = ((dy * dz) / dx); new_element.value = ( multiplier * (-sigma_x - (kappa_x / dt)) ); cell_elements[0].value += ( multiplier * (sigma_x + (kappa_x / dt)) ); } return new_element; } static void fill_discretization_matrix_elements_ddm (struct cell_node *grid_cell, void *neighbour_grid_cell, real_cpu dt ,char direction) { uint32_t position; bool has_found; real_cpu dx, dy, dz; struct transition_node *white_neighbor_cell; struct cell_node *black_neighbor_cell; /* When neighbour_grid_cell is a transition node, looks for the next neighbor * cell which is a cell node. */ uint16_t neighbour_grid_cell_level = ((struct basic_cell_data *)(neighbour_grid_cell))->level; char neighbour_grid_cell_type = ((struct basic_cell_data *)(neighbour_grid_cell))->type; if(neighbour_grid_cell_level > grid_cell->cell_data.level) { if(neighbour_grid_cell_type == TRANSITION_NODE_TYPE) { has_found = false; while(!has_found) { if(neighbour_grid_cell_type == TRANSITION_NODE_TYPE) { white_neighbor_cell = (struct transition_node *)neighbour_grid_cell; if(white_neighbor_cell->single_connector == NULL) { has_found = true; } else { neighbour_grid_cell = white_neighbor_cell->quadruple_connector1; neighbour_grid_cell_type = ((struct basic_cell_data *)(neighbour_grid_cell))->type; } } else { break; } } } } else { if(neighbour_grid_cell_level <= grid_cell->cell_data.level && (neighbour_grid_cell_type == TRANSITION_NODE_TYPE)) { has_found = false; while(!has_found) { if(neighbour_grid_cell_type == TRANSITION_NODE_TYPE) { white_neighbor_cell = (struct transition_node *)(neighbour_grid_cell); if(white_neighbor_cell->single_connector == 0) { has_found = true; } else { neighbour_grid_cell = white_neighbor_cell->single_connector; neighbour_grid_cell_type = ((struct basic_cell_data *)(neighbour_grid_cell))->type; } } else { break; } } } } // We care only with the interior points if(neighbour_grid_cell_type == CELL_NODE_TYPE) { black_neighbor_cell = (struct cell_node *)(neighbour_grid_cell); if(black_neighbor_cell->active) { real_cpu sigma_x1 = grid_cell->sigma.x; real_cpu sigma_x2 = black_neighbor_cell->sigma.x; real_cpu sigma_x = 0.0; if(sigma_x1 != 0.0 && sigma_x2 != 0.0) { sigma_x = (2.0f * sigma_x1 * sigma_x2) / (sigma_x1 + sigma_x2); } real_cpu sigma_y1 = grid_cell->sigma.y; real_cpu sigma_y2 = black_neighbor_cell->sigma.y; real_cpu sigma_y = 0.0; if(sigma_y1 != 0.0 && sigma_y2 != 0.0) { sigma_y = (2.0f * sigma_y1 * sigma_y2) / (sigma_y1 + sigma_y2); } real_cpu sigma_z1 = grid_cell->sigma.z; real_cpu sigma_z2 = black_neighbor_cell->sigma.z; real_cpu sigma_z = 0.0; if(sigma_z1 != 0.0 && sigma_z2 != 0.0) { sigma_z = (2.0f * sigma_z1 * sigma_z2) / (sigma_z1 + sigma_z2); } if(black_neighbor_cell->cell_data.level > grid_cell->cell_data.level) { dx = black_neighbor_cell->discretization.x; dy = black_neighbor_cell->discretization.y; dz = black_neighbor_cell->discretization.z; } else { dx = grid_cell->discretization.x; dy = grid_cell->discretization.y; dz = grid_cell->discretization.z; } lock_cell_node(grid_cell); struct element *cell_elements = grid_cell->elements; position = black_neighbor_cell->grid_position; size_t max_elements = arrlen(cell_elements); bool insert = true; for(size_t i = 1; i < max_elements; i++) { if(cell_elements[i].column == position) { insert = false; break; } } if(insert) { struct element new_element = fill_element_ddm(position, direction,\ dx, dy, dz,\ sigma_x, sigma_y, sigma_z,\ grid_cell->kappa.x,grid_cell->kappa.y,grid_cell->kappa.z,\ dt, cell_elements); new_element.cell = black_neighbor_cell; arrput(grid_cell->elements, new_element); } unlock_cell_node(grid_cell); lock_cell_node(black_neighbor_cell); cell_elements = black_neighbor_cell->elements; position = grid_cell->grid_position; max_elements = arrlen(cell_elements); insert = true; for(size_t i = 1; i < max_elements; i++) { if(cell_elements[i].column == position) { insert = false; break; } } if(insert) { struct element new_element = fill_element_ddm(position, direction,\ dx, dy, dz,\ sigma_x, sigma_y, sigma_z,\ grid_cell->kappa.x,grid_cell->kappa.y,grid_cell->kappa.z,\ dt, cell_elements); new_element.cell = grid_cell; arrput(black_neighbor_cell->elements, new_element); } unlock_cell_node(black_neighbor_cell); } } } void initialize_diagonal_elements(struct monodomain_solver *the_solver, struct grid *the_grid) { real_cpu alpha, dx, dy, dz; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; real_cpu beta = the_solver->beta; real_cpu cm = the_solver->cm; real_cpu dt = the_solver->dt; int i; #pragma omp parallel for private(alpha, dx, dy, dz) for(i = 0; i < num_active_cells; i++) { dx = ac[i]->discretization.x; dy = ac[i]->discretization.y; dz = ac[i]->discretization.z; alpha = ALPHA(beta, cm, dt, dx, dy, dz); struct element element; element.column = ac[i]->grid_position; element.cell = ac[i]; element.value = alpha; if(ac[i]->elements) arrfree(ac[i]->elements); ac[i]->elements = NULL; arrsetcap(ac[i]->elements, 7); arrput(ac[i]->elements, element); } } int randRange(int n) { int limit; int r; limit = RAND_MAX - (RAND_MAX % n); while((r = rand()) >= limit) ; return r % n; } // This function will read the fibrotic regions and for each cell that is inside the region and we will // reduce its conductivity value based on the 'sigma_factor'. ASSEMBLY_MATRIX (heterogenous_fibrotic_sigma_with_factor_ddm_assembly_matrix) { static bool sigma_initialized = false; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; initialize_diagonal_elements(the_solver, the_grid); char *fib_file = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(fib_file, config->config_data, "fibrosis_file"); int fib_size = 0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(int,fib_size, config->config_data, "size"); real sigma_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_x, config->config_data, "sigma_x"); real sigma_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_y, config->config_data, "sigma_y"); real sigma_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_z, config->config_data, "sigma_z"); real cell_length_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_x, config->config_data, "cell_length_x"); real cell_length_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_y, config->config_data, "cell_length_y"); real cell_length_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_z, config->config_data, "cell_length_z"); real sigma_factor = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor, config->config_data, "sigma_factor"); // Calculate the kappa values on each cell of th grid calculate_kappa_elements(the_solver,the_grid,cell_length_x,cell_length_y,cell_length_z); if(!sigma_initialized) { #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } sigma_initialized = true; } // Read and store the fibrosis locations FILE *file = fopen(fib_file, "r"); if(!file) { printf("Error opening file %s!!\n", fib_file); exit(0); } real_cpu **scar_mesh = (real_cpu **)malloc(sizeof(real_cpu *) * fib_size); for(int i = 0; i < fib_size; i++) { scar_mesh[i] = (real_cpu *)malloc(sizeof(real_cpu) * 7); if(scar_mesh[i] == NULL) { printf("Failed to allocate memory\n"); exit(0); } } uint32_t i = 0; while (fscanf(file, "%lf,%lf,%lf,%lf,%lf,%lf,%lf\n", &scar_mesh[i][0], &scar_mesh[i][1], &scar_mesh[i][2], &scar_mesh[i][3], &scar_mesh[i][4], &scar_mesh[i][5], &scar_mesh[i][6]) != EOF) { i++; } fclose(file); uint32_t num_fibrotic_regions = i; // Pass through all the cells of the grid and check if its center is inside the current // fibrotic region #pragma omp parallel for for(int j = 0; j < num_fibrotic_regions; j++) { struct cell_node *grid_cell = the_grid->first_cell; real_cpu b_center_x = scar_mesh[j][0]; real_cpu b_center_y = scar_mesh[j][1]; real_cpu b_h_dx = scar_mesh[j][3]; real_cpu b_h_dy = scar_mesh[j][4]; bool active = (bool) (scar_mesh[j][6]); while(grid_cell != 0) { if (grid_cell->active) { real_cpu center_x = grid_cell->center.x; real_cpu center_y = grid_cell->center.y; real_cpu half_dx = grid_cell->discretization.x/2.0; real_cpu half_dy = grid_cell->discretization.y/2.0; struct point_3d p; struct point_3d q; p.x = b_center_y + b_h_dy; p.y = b_center_y - b_h_dy; q.x = b_center_x + b_h_dx; q.y = b_center_x - b_h_dx; // Check if the current cell is inside the fibrotic region if (center_x > q.y && center_x < q.x && center_y > p.y && center_y < p.x) { if(active == 0) { grid_cell->sigma.x = sigma_x * sigma_factor; grid_cell->sigma.y = sigma_y * sigma_factor; grid_cell->sigma.z = sigma_z * sigma_factor; } } } grid_cell = grid_cell->next; } } printf("[!] Using DDM formulation\n"); printf("[X] Cell length = %.10lf || sigma_x = %.10lf || dx = %.10lf || kappa_x = %.10lf\n",\ cell_length_x,ac[0]->sigma.x,ac[0]->discretization.x,ac[0]->kappa.x); printf("[Y] Cell length = %.10lf || sigma_y = %.10lf || dy = %.10lf || kappa_y = %.10lf\n",\ cell_length_y,ac[0]->sigma.y,ac[0]->discretization.y,ac[0]->kappa.y); printf("[Z] Cell length = %.10lf || sigma_z = %.10lf || dz = %.10lf || kappa_z = %.10lf\n",\ cell_length_z,ac[0]->sigma.z,ac[0]->discretization.z,ac[0]->kappa.z); #pragma omp parallel for for(int i = 0; i < num_active_cells; i++) { // Computes and designates the flux due to south cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->south, the_solver->dt,'s'); // Computes and designates the flux due to north cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->north, the_solver->dt,'n'); // Computes and designates the flux due to east cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->east, the_solver->dt,'e'); // Computes and designates the flux due to west cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->west, the_solver->dt,'w'); // Computes and designates the flux due to front cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->front, the_solver->dt,'f'); // Computes and designates the flux due to back cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->back, the_solver->dt,'b'); } for(int k = 0; k < fib_size; k++) { free(scar_mesh[k]); } free(scar_mesh); } ASSEMBLY_MATRIX(homogenous_ddm_assembly_matrix) { static bool sigma_initialized = false; struct cell_node *grid_cell; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; initialize_diagonal_elements(the_solver, the_grid); real sigma_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_x, config->config_data, "sigma_x"); real sigma_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_y, config->config_data, "sigma_y"); real sigma_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_z, config->config_data, "sigma_z"); real cell_length_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_x, config->config_data, "cell_length_x"); real cell_length_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_y, config->config_data, "cell_length_y"); real cell_length_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_z, config->config_data, "cell_length_z"); // Calculate the kappa values on each cell of th grid calculate_kappa_elements(the_solver,the_grid,cell_length_x,cell_length_y,cell_length_z); printf("[!] Using DDM formulation\n"); printf("[X] Cell length = %.10lf || sigma_x = %.10lf || dx = %.10lf || kappa_x = %.10lf\n",\ cell_length_x,ac[0]->sigma.x,ac[0]->discretization.x,ac[0]->kappa.x); printf("[Y] Cell length = %.10lf || sigma_y = %.10lf || dy = %.10lf || kappa_y = %.10lf\n",\ cell_length_y,ac[0]->sigma.y,ac[0]->discretization.y,ac[0]->kappa.y); printf("[Z] Cell length = %.10lf || sigma_z = %.10lf || dz = %.10lf || kappa_z = %.10lf\n",\ cell_length_z,ac[0]->sigma.z,ac[0]->discretization.z,ac[0]->kappa.z); // Initialize the conductivities of each cell if (!sigma_initialized) { grid_cell = the_grid->first_cell; while(grid_cell != 0) { if(grid_cell->active) { grid_cell->sigma.x = sigma_x; grid_cell->sigma.y = sigma_y; grid_cell->sigma.z = sigma_z; } grid_cell = grid_cell->next; } sigma_initialized = true; } #pragma omp parallel for for(int i = 0; i < num_active_cells; i++) { // Computes and designates the flux due to south cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->south, the_solver->dt,'s'); // Computes and designates the flux due to north cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->north, the_solver->dt,'n'); // Computes and designates the flux due to east cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->east, the_solver->dt,'e'); // Computes and designates the flux due to west cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->west, the_solver->dt,'w'); // Computes and designates the flux due to front cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->front, the_solver->dt,'f'); // Computes and designates the flux due to back cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->back, the_solver->dt,'b'); } } ASSEMBLY_MATRIX(sigma_low_region_triangle_ddm_tiny) { uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; initialize_diagonal_elements(the_solver, the_grid); int i; real sigma_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_x, config->config_data, "sigma_x"); real sigma_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_y, config->config_data, "sigma_y"); real sigma_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_z, config->config_data, "sigma_z"); real cell_length_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_x, config->config_data, "cell_length_x"); real cell_length_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_y, config->config_data, "cell_length_y"); real cell_length_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_z, config->config_data, "cell_length_z"); real sigma_factor = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor, config->config_data, "sigma_factor"); real sigma_factor_2 = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor_2, config->config_data, "sigma_factor_2"); real side_length = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,side_length, config->config_data, "side_length"); // Calculate the kappas for the DDM calculate_kappa_elements(the_solver,the_grid,cell_length_x,cell_length_y,cell_length_z); //~ bool inside; // Initialize the conductivities #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } printf("[!] Using DDM formulation\n"); printf("[X] Cell length = %.10lf || sigma_x = %.10lf || dx = %.10lf || kappa_x = %.10lf\n",\ cell_length_x,ac[0]->sigma.x,ac[0]->discretization.x,ac[0]->kappa.x); printf("[Y] Cell length = %.10lf || sigma_y = %.10lf || dy = %.10lf || kappa_y = %.10lf\n",\ cell_length_y,ac[0]->sigma.y,ac[0]->discretization.y,ac[0]->kappa.y); printf("[Z] Cell length = %.10lf || sigma_z = %.10lf || dz = %.10lf || kappa_z = %.10lf\n",\ cell_length_z,ac[0]->sigma.z,ac[0]->discretization.z,ac[0]->kappa.z); //regiao ao redor do meio com sigma menor// /*Aqui comeca a brincadeira de criar os triangulos * * Pensei em dividir em 3 partes a primeira uma barra * * 1 2 3 * ******* * ****** * ***** * **** * ** * * * 1 fazendo o canal * * ** * ** * ** * ** * ** * * 2 considerar a abertura de maneira que pegue 5 celulas * * * * * * * * * * * 3 pegar o triangulo * * **** * *** * ** * * * * Dai (1)+(2)+(3) eh a area que desejamos * * */ // set the square scar mesh... //~ real X_left = side_length/3.0; //~ real X_right = (side_length/3.0+side_length/3.0); //~ real Y_down = side_length/3.0; //~ real Y_up = (side_length/3.0+side_length/3.0); //esquerda //~ real X_left = side_length/6.0; //~ real X_right = (side_length/3.0+side_length/3.0); real X_left = side_length/12.0; real X_right = (side_length/6.0+side_length/6.0); //~ real Y_down = side_length/6.0; real Y_down = 0.0; //~ real Y_up = (side_length/6.0+side_length/6.0+side_length/12.0); real Y_up = (side_length/6.0+side_length/6.0+side_length/6.0); //General square slow sigma *aquela divisao por 3 pode mudar*... #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { double x_prev = ac[i]->center.x; double y_prev = ac[i]->center.y; if ((x_prev>= X_left ) && (x_prev <= X_right) && (y_prev>= Y_down) && (y_prev<= Y_up)) { ac[i]->sigma.x = sigma_x * sigma_factor; ac[i]->sigma.y = sigma_y * sigma_factor; ac[i]->sigma.z = sigma_z * sigma_factor; } } //Vou recuparando os triangulos do square #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { double x = ac[i]->center.x; double y = ac[i]->center.y; //Part 1 int mid_scar_Y = (Y_down + Y_up)/12.0; int Part1_x_right = X_left + 10.*(cell_length_x); int Part2_x_left = Part1_x_right + cell_length_x; if( (y<=(mid_scar_Y+(cell_length_y/2.))) && (y> (mid_scar_Y- (cell_length_y/2.))) && (x>= X_left) && (x<= X_right)) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } //Part 1.5 if( (y<=(mid_scar_Y+(cell_length_y))) && (y> (mid_scar_Y- 2.0*(cell_length_y))) && (x>= X_left) && (x<= X_left+5.0*cell_length_x)) //isto mudei { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } //Part 2 int Part2_y_up = mid_scar_Y + (3.*cell_length_y); //original int Part2_y_down = mid_scar_Y - (2.*cell_length_y); //original //~ int Part2_y_up = mid_scar_Y + (20.*cell_length_y);//modifiquei //~ int Part2_y_down = mid_scar_Y - (20.*cell_length_y);//modifiquei //talvez colocar o de cima if( (x>= Part2_x_left) && (x<= X_right) && (y<= Part2_y_up ) &&(y> Part2_y_down)) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } //Part 3 up, m coeficiente angular int Part3_x_left = Part2_x_left + cell_length_x; int Part3_y_up = Part2_y_up; real m_up = (Y_up-Part3_y_up)/(X_right-Part3_x_left); int Part3_x_left_2 = (Part3_x_left + X_right)/2.0; if((x>=Part3_x_left) && (x <= X_right) && (y <= (m_up*(x-Part3_x_left)+Part3_y_up)) && (y>= Part3_y_up) ) { if(x>=Part3_x_left_2) { ac[i]->sigma.x = sigma_x*sigma_factor_2; ac[i]->sigma.y = sigma_y*sigma_factor_2; ac[i]->sigma.z = sigma_z*sigma_factor_2; } else { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } } //Part 3 down, m coeficiente angular int Part3_y_down = Part2_y_down; real m_down = (Y_down - Part3_y_down)/(X_right-Part3_x_left); if((x>=Part3_x_left) && (x <= X_right) && (y >= (m_down*(x-Part3_x_left)+ Part3_y_down)) && (y<= Part3_y_down) && (y>=Y_down)) { if(x>=Part3_x_left_2) { ac[i]->sigma.x = sigma_x*sigma_factor_2; ac[i]->sigma.y = sigma_y*sigma_factor_2; ac[i]->sigma.z = sigma_z*sigma_factor_2; } else { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } } //middle canal if ((y<=Part3_y_up) && (y>=Part3_y_down) && (x>=Part3_x_left_2) && (x<=X_right)) { ac[i]->sigma.x = sigma_x*sigma_factor_2; ac[i]->sigma.y = sigma_y*sigma_factor_2; ac[i]->sigma.z = sigma_z*sigma_factor_2; } } // Aqui termina a construcao dos triangulos #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { // Computes and designates the flux due to south cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->south, the_solver->dt,'s'); // Computes and designates the flux due to north cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->north, the_solver->dt,'n'); // Computes and designates the flux due to east cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->east, the_solver->dt,'e'); // Computes and designates the flux due to west cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->west, the_solver->dt,'w'); // Computes and designates the flux due to front cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->front, the_solver->dt,'f'); // Computes and designates the flux due to back cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->back, the_solver->dt,'b'); } } ASSEMBLY_MATRIX(write_sigma_low_region_triangle_ddm_tiny) { uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; struct cell_node *grid_cell; initialize_diagonal_elements(the_solver, the_grid); int i; real sigma_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_x, config->config_data, "sigma_x"); real sigma_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_y, config->config_data, "sigma_y"); real sigma_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_z, config->config_data, "sigma_z"); real cell_length_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_x, config->config_data, "cell_length_x"); real cell_length_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_y, config->config_data, "cell_length_y"); real cell_length_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_z, config->config_data, "cell_length_z"); real sigma_factor = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor, config->config_data, "sigma_factor"); real sigma_factor_2 = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor_2, config->config_data, "sigma_factor_2"); real side_length = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,side_length, config->config_data, "side_length"); char *new_fib_file = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(new_fib_file, config->config_data, "rescaled_fibrosis_file"); //~ real scar_length = 0.0; //~ GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(scar_length, config->config_data, "scar_length"); //~ calculate_kappa_elements(the_grid,cell_length_x,cell_length_y,cell_length_z); //~ calculate_kappa_elements(the_solver,the_grid,cell_length_x,cell_length_y,cell_length_z); //~ bool inside; #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } printf("[!] Using DDM formulation\n"); printf("[X] Cell length = %.10lf || sigma_x = %.10lf || dx = %.10lf || kappa_x = %.10lf\n",\ cell_length_x,ac[0]->sigma.x,ac[0]->discretization.x,ac[0]->kappa.x); printf("[Y] Cell length = %.10lf || sigma_y = %.10lf || dy = %.10lf || kappa_y = %.10lf\n",\ cell_length_y,ac[0]->sigma.y,ac[0]->discretization.y,ac[0]->kappa.y); printf("[Z] Cell length = %.10lf || sigma_z = %.10lf || dz = %.10lf || kappa_z = %.10lf\n",\ cell_length_z,ac[0]->sigma.z,ac[0]->discretization.z,ac[0]->kappa.z); //regiao ao redor do meio com sigma menor// /*Aqui comeca a brincadeira de criar os triangulos * * Pensei em dividir em 3 partes a primeira uma barra * * 1 2 3 * ******* * ****** * ***** * **** * ** * * * 1 fazendo o canal * * ** * ** * ** * ** * ** * * 2 considerar a abertura de maneira que pegue 5 celulas * * * * * * * * * * * 3 pegar o triangulo * * **** * *** * ** * * * * Dai (1)+(2)+(3) eh a ahrea que desejamos * * */ // set the square scar mesh... //~ real X_left = side_length/3.0; //~ real X_right = (side_length/3.0+side_length/3.0); //~ real Y_down = side_length/3.0; //~ real Y_up = (side_length/3.0+side_length/3.0); //esquerda //~ real X_left = side_length/6.0; //~ real X_right = (side_length/3.0+side_length/3.0); real X_left = side_length/12.0; real X_right = (side_length/6.0+side_length/6.0); //~ real Y_down = side_length/6.0; real Y_down = 0.0; //~ real Y_up = (side_length/6.0+side_length/6.0+side_length/12.0); real Y_up = (side_length/6.0+side_length/6.0+side_length/6.0); //General square slow sigma *aquela divisao por 3 pode mudar*... #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { double x_prev = ac[i]->center.x; double y_prev = ac[i]->center.y; if ((x_prev>= X_left ) && (x_prev <= X_right) && (y_prev>= Y_down) && (y_prev<= Y_up)) { ac[i]->sigma.x = sigma_x * sigma_factor; ac[i]->sigma.y = sigma_y * sigma_factor; ac[i]->sigma.z = sigma_z * sigma_factor; } } //Vou recuparando os triangulos do square #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { double x = ac[i]->center.x; double y = ac[i]->center.y; //Part 1 int mid_scar_Y = (Y_down + Y_up)/12.0; int Part1_x_right = X_left + 10.*(cell_length_x); int Part2_x_left = Part1_x_right + cell_length_x; if( (y<=(mid_scar_Y+(cell_length_y/2.))) && (y> (mid_scar_Y- (cell_length_y/2.))) && (x>= X_left) && (x<= X_right)) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } //Part 1.5 if( (y<=(mid_scar_Y+(cell_length_y))) && (y> (mid_scar_Y- 2.0*(cell_length_y))) && (x>= X_left) && (x<= X_left+5.0*cell_length_x)) //isto mudei { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } //~ //Part 2 int Part2_y_up = mid_scar_Y + (3.*cell_length_y); //original int Part2_y_down = mid_scar_Y - (2.*cell_length_y); //original //~ int Part2_y_up = mid_scar_Y + (20.*cell_length_y);//modifiquei //~ int Part2_y_down = mid_scar_Y - (20.*cell_length_y);//modifiquei //talvez colocar o de cima if( (x>= Part2_x_left) && (x<= X_right) && (y<= Part2_y_up ) &&(y> Part2_y_down)) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } //~ //Part 3 up, m coeficiente angular int Part3_x_left = Part2_x_left + cell_length_x; int Part3_y_up = Part2_y_up; real m_up = (Y_up-Part3_y_up)/(X_right-Part3_x_left); int Part3_x_left_2 = (Part3_x_left + X_right)/2.0; if((x>=Part3_x_left) && (x <= X_right) && (y <= (m_up*(x-Part3_x_left)+Part3_y_up)) && (y>= Part3_y_up) ) { if(x>=Part3_x_left_2) { ac[i]->sigma.x = sigma_x*sigma_factor_2; ac[i]->sigma.y = sigma_y*sigma_factor_2; ac[i]->sigma.z = sigma_z*sigma_factor_2; } else { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } } //~ //Part 3 down, m coeficiente angular int Part3_y_down = Part2_y_down; real m_down = (Y_down - Part3_y_down)/(X_right-Part3_x_left); if((x>=Part3_x_left) && (x <= X_right) && (y >= (m_down*(x-Part3_x_left)+ Part3_y_down)) && (y<= Part3_y_down) && (y>=Y_down)) { if(x>=Part3_x_left_2) { ac[i]->sigma.x = sigma_x*sigma_factor_2; ac[i]->sigma.y = sigma_y*sigma_factor_2; ac[i]->sigma.z = sigma_z*sigma_factor_2; } else { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } } //middle canal if ((y<=Part3_y_up) && (y>=Part3_y_down) && (x>=Part3_x_left_2) && (x<=X_right)) { ac[i]->sigma.x = sigma_x*sigma_factor_2; ac[i]->sigma.y = sigma_y*sigma_factor_2; ac[i]->sigma.z = sigma_z*sigma_factor_2; } } // Write the new grid configuration on the rescaled_fibrosis file FILE *fileW = fopen(new_fib_file, "w+"); grid_cell = the_grid->first_cell; while(grid_cell != 0) { if(grid_cell->active) { // We reescale the cell position using the 'rescale_factor' double center_x = grid_cell->center.x ; double center_y = grid_cell->center.y ; double center_z = grid_cell->center.z ; double dx = grid_cell->discretization.x; double dy = grid_cell->discretization.y; double dz = grid_cell->discretization.z; double w_sigma_x = grid_cell->sigma.x; double w_sigma_y = grid_cell->sigma.y; double w_sigma_z = grid_cell->sigma.z; // Then, we write only the fibrotic regions to the output file fprintf(fileW,"%g,%g,%g,%g,%g,%g,%g,%g,%g\n",center_x,center_y,center_z,dx/2.0,dy/2.0,dz/2.0,w_sigma_x,w_sigma_y,w_sigma_z); } grid_cell = grid_cell->next; } fclose(fileW); // We just leave the program after this ... print_to_stdout_and_file("[!] Finish writing fibrotic region file '%s'!\n",new_fib_file); exit(EXIT_SUCCESS); } ASSEMBLY_MATRIX (heterogenous_fibrotic_sigma_with_factor_ddm_assembly_matrix_add_sigma_reverse) { static bool sigma_initialized = false; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; initialize_diagonal_elements(the_solver, the_grid); char *fib_file = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(fib_file, config->config_data, "fibrosis_file"); int fib_size = 0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(int,fib_size, config->config_data, "size"); real sigma_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_x, config->config_data, "sigma_x"); real sigma_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_y, config->config_data, "sigma_y"); real sigma_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_z, config->config_data, "sigma_z"); real cell_length_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_x, config->config_data, "cell_length_x"); real cell_length_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_y, config->config_data, "cell_length_y"); real cell_length_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_z, config->config_data, "cell_length_z"); real sigma_factor = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor, config->config_data, "sigma_factor"); // Calculate the kappa values on each cell of th grid calculate_kappa_elements(the_solver,the_grid,cell_length_x,cell_length_y,cell_length_z); if(!sigma_initialized) { #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } sigma_initialized = true; } // Read and store the fibrosis locations FILE *file = fopen(fib_file, "r"); if(!file) { printf("Error opening file %s!!\n", fib_file); exit(0); } real_cpu **scar_mesh = (real_cpu **)malloc(sizeof(real_cpu *) * fib_size); for(int i = 0; i < fib_size; i++) { scar_mesh[i] = (real_cpu *)malloc(sizeof(real_cpu) * 9); if(scar_mesh[i] == NULL) { printf("Failed to allocate memory\n"); exit(0); } } uint32_t i = 0; while (fscanf(file, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf\n", &scar_mesh[i][0], &scar_mesh[i][1], &scar_mesh[i][2], &scar_mesh[i][3],\ &scar_mesh[i][4], &scar_mesh[i][5], &scar_mesh[i][6], &scar_mesh[i][7],\ &scar_mesh[i][8]) != EOF) { i++; } fclose(file); uint32_t num_fibrotic_regions = i; // Pass through all the cells of the grid and check if its center is inside the current // fibrotic region struct cell_node *grid_cell = the_grid->first_cell; real aux_sigma_load_x; real aux_sigma_load_y; real aux_sigma_load_z; while(grid_cell != 0) { if (grid_cell->active) { real_cpu center_x = grid_cell->center.x; real_cpu center_y = grid_cell->center.y; real_cpu half_dx = grid_cell->discretization.x/2.0; real_cpu half_dy = grid_cell->discretization.y/2.0; aux_sigma_load_x = grid_cell->sigma.x; aux_sigma_load_y = grid_cell->sigma.y; aux_sigma_load_z = grid_cell->sigma.z; struct point_3d p; struct point_3d q; p.x = center_y + half_dy; p.y = center_y - half_dy; q.x = center_x + half_dx; q.y = center_x - half_dx; for (int j = 0; j < num_fibrotic_regions; j++) { real_cpu b_center_x = scar_mesh[j][0]; real_cpu b_center_y = scar_mesh[j][1]; real_cpu b_sigma_x = scar_mesh[j][6]; real_cpu b_sigma_y = scar_mesh[j][7]; real_cpu b_sigma_z = scar_mesh[j][8]; if (b_center_x > q.y && b_center_x < q.x && b_center_y > p.y && b_center_y < p.x) { aux_sigma_load_x = b_sigma_x; aux_sigma_load_y = b_sigma_y; aux_sigma_load_z = b_sigma_z; //armazenar os valores dos sigmas ao redor e utilizar a meia harmonica para resolvers } } //~ grid_cell->sigma.x = 2.0*((sigma_x * aux_sigma_load_x)/(sigma_x + aux_sigma_load_x)); //~ grid_cell->sigma.y = 2.0*((sigma_y * aux_sigma_load_y)/(sigma_y + aux_sigma_load_y)); //~ grid_cell->sigma.z = 2.0*((sigma_z * aux_sigma_load_z)/(sigma_z + aux_sigma_load_z)); grid_cell->sigma.x = aux_sigma_load_x; grid_cell->sigma.y = aux_sigma_load_y; grid_cell->sigma.z = aux_sigma_load_z; } grid_cell = grid_cell->next; } printf("[!] Using DDM formulation\n"); printf("[X] Cell length = %.10lf || sigma_x = %.10lf || dx = %.10lf || kappa_x = %.10lf\n",\ cell_length_x,ac[0]->sigma.x,ac[0]->discretization.x,ac[0]->kappa.x); printf("[Y] Cell length = %.10lf || sigma_y = %.10lf || dy = %.10lf || kappa_y = %.10lf\n",\ cell_length_y,ac[0]->sigma.y,ac[0]->discretization.y,ac[0]->kappa.y); printf("[Z] Cell length = %.10lf || sigma_z = %.10lf || dz = %.10lf || kappa_z = %.10lf\n",\ cell_length_z,ac[0]->sigma.z,ac[0]->discretization.z,ac[0]->kappa.z); #pragma omp parallel for for(int i = 0; i < num_active_cells; i++) { // Computes and designates the flux due to south cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->south, the_solver->dt,'s'); // Computes and designates the flux due to north cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->north, the_solver->dt,'n'); // Computes and designates the flux due to east cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->east, the_solver->dt,'e'); // Computes and designates the flux due to west cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->west, the_solver->dt,'w'); // Computes and designates the flux due to front cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->front, the_solver->dt,'f'); // Computes and designates the flux due to back cells. fill_discretization_matrix_elements_ddm(ac[i], ac[i]->back, the_solver->dt,'b'); } for(int k = 0; k < fib_size; k++) { free(scar_mesh[k]); } free(scar_mesh); } ASSEMBLY_MATRIX(heterogenous_fibrotic_region_file_write_using_seed) { static bool sigma_initialized = false; int num; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; struct cell_node *grid_cell; initialize_diagonal_elements(the_solver, the_grid); real sigma_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_x, config->config_data, "sigma_x"); real sigma_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_y, config->config_data, "sigma_y"); real sigma_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_z, config->config_data, "sigma_z"); real sigma_factor = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor, config->config_data, "sigma_factor"); real sigma_factor_2 = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor_2, config->config_data, "sigma_factor_2"); real_cpu phi = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,phi, config->config_data, "phi"); real_cpu phi_2 = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,phi_2, config->config_data, "phi_2"); unsigned seed = 0; GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(unsigned,seed, config->config_data, "seed"); char *new_fib_file = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(new_fib_file, config->config_data, "rescaled_fibrosis_file"); real rescale_factor = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,rescale_factor, config->config_data, "rescale_factor"); real x_shift = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,x_shift, config->config_data, "x_shift"); real y_shift = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,y_shift, config->config_data, "y_shift"); // Write the new fibrotic region file FILE *fileW = fopen(new_fib_file, "w+"); grid_cell = the_grid->first_cell; // Initialize the random the generator with the same seed used by the original model srand(seed); while(grid_cell != 0) { if(grid_cell->active) { real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi) { // We reescale the cell position using the 'rescale_factor' grid_cell->sigma.x = sigma_x * sigma_factor_2; grid_cell->sigma.y = sigma_y * sigma_factor_2; grid_cell->sigma.z = sigma_z * sigma_factor_2; // Then, we write only the fibrotic regions to the output file } else { if((p>=phi)&&(p<=phi_2)) { grid_cell->sigma.x = sigma_x * sigma_factor; grid_cell->sigma.y = sigma_y * sigma_factor; grid_cell->sigma.z = sigma_z * sigma_factor; } else { grid_cell->sigma.x = sigma_x; grid_cell->sigma.y = sigma_y; grid_cell->sigma.z = sigma_z; } } double center_x = grid_cell->center.x; double center_y = grid_cell->center.y; double center_z = grid_cell->center.z; double dx = grid_cell->discretization.x ; double dy = grid_cell->discretization.y ; double dz = grid_cell->discretization.z ; center_x = center_x + x_shift; center_y = center_y + y_shift; fprintf(fileW,"%g,%g,%g,%g,%g,%g,%g,%g,%g\n",center_x,center_y,center_z,dx/2.0,dy/2.0,dz/2.0,grid_cell->sigma.x,grid_cell->sigma.y,grid_cell->sigma.z); } grid_cell = grid_cell->next; } fclose(fileW); // We just leave the program after this ... print_to_stdout_and_file("[!] Finish writing fibrotic region file '%s'!\n",new_fib_file); exit(EXIT_SUCCESS); } ASSEMBLY_MATRIX(sigma_low_region_triangle_ddm_tiny_random_write) { uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node **ac = the_grid->active_cells; initialize_diagonal_elements(the_solver, the_grid); int i; char *fib_file_1 = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(fib_file_1, config->config_data, "fibrosis_file_1"); char *fib_file_2 = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(fib_file_2, config->config_data, "fibrosis_file_2"); char *fib_file_3 = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(fib_file_3, config->config_data, "fibrosis_file_3"); char *new_fib_file = NULL; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(new_fib_file, config->config_data, "new_fib_file"); int fib_size = 0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(int,fib_size, config->config_data, "size"); real sigma_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_x, config->config_data, "sigma_x"); real sigma_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_y, config->config_data, "sigma_y"); real sigma_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_z, config->config_data, "sigma_z"); real cell_length_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_x, config->config_data, "cell_length_x"); real cell_length_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_y, config->config_data, "cell_length_y"); real cell_length_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,cell_length_z, config->config_data, "cell_length_z"); real sigma_factor = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor, config->config_data, "sigma_factor"); real sigma_factor_2 = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,sigma_factor_2, config->config_data, "sigma_factor_2"); real side_length = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,side_length, config->config_data, "side_length"); calculate_kappa_elements(the_solver,the_grid,cell_length_x,cell_length_y,cell_length_z); #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { ac[i]->sigma.x = sigma_x; ac[i]->sigma.y = sigma_y; ac[i]->sigma.z = sigma_z; } printf("[!] Using DDM formulation\n"); printf("[X] Cell length = %.10lf || sigma_x = %.10lf || dx = %.10lf || kappa_x = %.10lf\n",\ cell_length_x,ac[0]->sigma.x,ac[0]->discretization.x,ac[0]->kappa.x); printf("[Y] Cell length = %.10lf || sigma_y = %.10lf || dy = %.10lf || kappa_y = %.10lf\n",\ cell_length_y,ac[0]->sigma.y,ac[0]->discretization.y,ac[0]->kappa.y); printf("[Z] Cell length = %.10lf || sigma_z = %.10lf || dz = %.10lf || kappa_z = %.10lf\n",\ cell_length_z,ac[0]->sigma.z,ac[0]->discretization.z,ac[0]->kappa.z); // Allocate memory to store the grid configuration real_cpu **scar_mesh = (real_cpu **)malloc(sizeof(real_cpu *) * fib_size); for(int i = 0; i < fib_size; i++) { scar_mesh[i] = (real_cpu *)malloc(sizeof(real_cpu) * 9); if(scar_mesh[i] == NULL) { printf("Failed to allocate memory\n"); exit(0); } } FILE *file; i = 0; // Read the FIRST fibrosis file file = fopen(fib_file_1, "r"); if(!file) { printf("Error opening file %s!!\n", fib_file_1); exit(0); } while (fscanf(file, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf\n", &scar_mesh[i][0], &scar_mesh[i][1], &scar_mesh[i][2], &scar_mesh[i][3],\ &scar_mesh[i][4], &scar_mesh[i][5], &scar_mesh[i][6], &scar_mesh[i][7],\ &scar_mesh[i][8]) != EOF) { i++; } fclose(file); // Read the SECOND fibrosis file file = fopen(fib_file_2, "r"); if(!file) { printf("Error opening file %s!!\n", fib_file_2); exit(0); } while (fscanf(file, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf\n", &scar_mesh[i][0], &scar_mesh[i][1], &scar_mesh[i][2], &scar_mesh[i][3],\ &scar_mesh[i][4], &scar_mesh[i][5], &scar_mesh[i][6], &scar_mesh[i][7],\ &scar_mesh[i][8]) != EOF) { i++; } fclose(file); // Read the THIRD fibrosis file file = fopen(fib_file_3, "r"); if(!file) { printf("Error opening file %s!!\n", fib_file_3); exit(0); } while (fscanf(file, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf\n", &scar_mesh[i][0], &scar_mesh[i][1], &scar_mesh[i][2], &scar_mesh[i][3],\ &scar_mesh[i][4], &scar_mesh[i][5], &scar_mesh[i][6], &scar_mesh[i][7],\ &scar_mesh[i][8]) != EOF) { i++; } fclose(file); uint32_t num_fibrotic_regions = i; // Update the cells that are inside of the scar regions #pragma omp parallel for for(int j = 0; j < num_fibrotic_regions; j++) { struct cell_node *grid_cell = the_grid->first_cell; real_cpu b_center_x = scar_mesh[j][0]; real_cpu b_center_y = scar_mesh[j][1]; real_cpu b_h_dx = scar_mesh[j][3]; real_cpu b_h_dy = scar_mesh[j][4]; real aux_sigma_load_x = scar_mesh[j][6]; real aux_sigma_load_y = scar_mesh[j][7]; while(grid_cell != 0) { if (grid_cell->active) { real_cpu center_x = grid_cell->center.x; real_cpu center_y = grid_cell->center.y; real_cpu half_dy = grid_cell->discretization.y/2.0; real_cpu half_dx = grid_cell->discretization.x/2.0; struct point_3d p; struct point_3d q; p.x = b_center_y + b_h_dy; p.y = b_center_y - b_h_dy; q.x = b_center_x + b_h_dx; q.y = b_center_x - b_h_dx; // Check if the current cell is inside the fibrotic region if (center_x > q.y && center_x < q.x && center_y > p.y && center_y < p.x) { grid_cell->sigma.x = aux_sigma_load_x; grid_cell->sigma.y = aux_sigma_load_y; } } grid_cell = grid_cell->next; } } // Until here, we have a grid with both the column and horizontal regions ... // Start building the channel block real X_left = side_length/12.0; real X_left_1 = X_left + side_length/12.0; real X_right = 3000; real Y_down = 0.0; real Y_up = 1200; #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { double x = ac[i]->center.x; double y = ac[i]->center.y; // Region 1 starts here ... //Part 1 - (Channel construction) int mid_scar_Y = (Y_down + 7500)/12.0; int X_right_1 = X_left + cell_length_x; int Part1_x_right = X_left + 10.*(cell_length_x); int Part2_x_left = Part1_x_right + cell_length_x; // Decrease the conductivity of every cell inside the region create_sigma_low_block(ac[i],X_left,Part1_x_right,Y_down,Y_up,sigma_x,sigma_y,sigma_z,sigma_factor); // Create the little channel create_sigma_low_block(ac[i],X_left,Part1_x_right,mid_scar_Y - cell_length_y/2.,mid_scar_Y+(cell_length_y/4.), sigma_x,sigma_y,sigma_z,1.0 ); create_sigma_low_block(ac[i],X_left,X_left+2.0*cell_length_x,mid_scar_Y- 3.0*(cell_length_y),mid_scar_Y, sigma_x,sigma_y,sigma_z,1.0); //Part 2 - (Channel opening) int Part2_y_up = mid_scar_Y + (3.*cell_length_y); //original int Part2_y_down = mid_scar_Y - (4.*cell_length_y); //original int X_left_2 = X_left+5.0*cell_length_x; int X_right_2 = X_left+10.0*cell_length_x; create_sigma_low_block(ac[i], X_left_2,X_right_2,Part2_y_down,Part2_y_up, sigma_x,sigma_y,sigma_z,1.0); //Part 3 - triangle opening // First step int X_left_3 = X_left_2 + cell_length_x; int X_right_3 = X_right_2; int Y_up_3 = Part2_y_up + 5*cell_length_y; int Y_down_3 = Part2_y_down - 5*cell_length_y; real_cpu m_up = (Y_up_3-Part2_y_up)/(X_right_3-X_left_3); create_sigma_low_block(ac[i], X_left_3,X_right_3,Y_down_3,Y_up_3,sigma_x,sigma_y,sigma_z,1.0); // Second step int X_left_4 = X_left_3 + cell_length_x; int X_right_4 = X_right_3; int Y_up_4 = Y_up_3 + 5*cell_length_y; int Y_down_4 = Y_down_3 - 5*cell_length_y; create_sigma_low_block(ac[i], X_left_4,X_right_4,Y_down_4,Y_up_4,sigma_x,sigma_y,sigma_z,1.0); // Third step int X_left_5 = X_left_4 + cell_length_x; int X_right_5 = X_right_4; int Y_up_5 = Y_up_4 + 5*cell_length_y; int Y_down_5 = Y_down_4 - 5*cell_length_y; create_sigma_low_block(ac[i], X_left_5,X_right_5,Y_down_5,Y_up_5,sigma_x,sigma_y,sigma_z,1.0); // Fourth step int X_left_6 = X_left_5 + cell_length_x; int X_right_6 = X_right_5; int Y_up_6 = Y_up_5 + 5*cell_length_y; int Y_down_6 = Y_down_5 - 5*cell_length_y; create_sigma_low_block(ac[i], X_left_6,X_right_6,Y_down_6,Y_up_6,sigma_x,sigma_y,sigma_z,1.0); // Region 2 starts here ... // The horizontal barrier int X_left_7 = 1900; int X_right_7 = 3900; int Y_down_7 = 1500; int Y_up_7 = 1900; create_sigma_low_block(ac[i], X_left_7,X_right_7,Y_down_7,Y_up_7,sigma_x,sigma_y,sigma_z,sigma_factor_2); // The vertical barrier int X_left_8 = 3500; int X_right_8 = 3900; int Y_down_8 = 0; int Y_up_8 = 1700; create_sigma_low_block(ac[i], X_left_8,X_right_8,Y_down_8,Y_up_8,sigma_x,sigma_y,sigma_z,sigma_factor_2); // Region 3 starts here ... // First upper block int X_left_9 = 1900; int X_right_9 = 2300; int Y_down_9 = 7900; int Y_up_9 = 8300; create_sigma_low_block(ac[i], X_left_9,X_right_9,Y_down_9,Y_up_9,sigma_x,sigma_y,sigma_z,sigma_factor_2); // Second upper block int X_left_9_1 = 2600; int X_right_9_1 = 3000; int Y_down_9_1 = 7900; int Y_up_9_1 = 8300; create_sigma_low_block(ac[i], X_left_9_1,X_right_9_1,Y_down_9_1,Y_up_9_1,sigma_x,sigma_y,sigma_z,sigma_factor_2); // Third upper block (reset) int X_left_10 = 2600; int X_right_10 = 3000; int Y_down_10 = 8600; int Y_up_10 = 9000; create_sigma_low_block(ac[i], X_left_10,X_right_10,Y_down_10,Y_up_10,sigma_x,sigma_y,sigma_z,1.0); // Fourth upper block int X_left_11 = 3800; int X_right_11 = 4200; int Y_down_11 = 8200; int Y_up_11 = 8600; create_sigma_low_block(ac[i], X_left_11,X_right_11,Y_down_11,Y_up_11,sigma_x,sigma_y,sigma_z,sigma_factor_2); } // Write the grid configuration to an output file FILE *fileW = fopen(new_fib_file, "w+"); struct cell_node *grid_cell = the_grid->first_cell; // Initialize the random the generator with the same seed used by the original model while(grid_cell != 0) { if(grid_cell->active) { double center_x = grid_cell->center.x ; double center_y = grid_cell->center.y ; double center_z = grid_cell->center.z ; double dx = grid_cell->discretization.x; double dy = grid_cell->discretization.y; double dz = grid_cell->discretization.z; double w_sigma_x = grid_cell->sigma.x; double w_sigma_y = grid_cell->sigma.y; double w_sigma_z = grid_cell->sigma.z; fprintf(fileW,"%g,%g,%g,%g,%g,%g,%g,%g,%g\n",center_x,center_y,center_z,dx/2.0,dy/2.0,dz/2.0,w_sigma_x,w_sigma_y,w_sigma_z); } grid_cell = grid_cell->next; } fclose(fileW); // We just leave the program after this ... print_to_stdout_and_file("[!] Finish writing fibrotic region file '%s'!\n",new_fib_file); exit(EXIT_SUCCESS); }
GB_binop__rminus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp64) // A*D function (colscale): GB (_AxD__rminus_fp64) // D*A function (rowscale): GB (_DxB__rminus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp64) // C=scalar+B GB (_bind1st__rminus_fp64) // C=scalar+B' GB (_bind1st_tran__rminus_fp64) // C=A+scalar GB (_bind2nd__rminus_fp64) // C=A'+scalar GB (_bind2nd_tran__rminus_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP64 || GxB_NO_RMINUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
memory_simd_avx.c
#include <immintrin.h> #include <stdio.h> //Convert from RGBRGBRGB... to RRR..., GGG..., BBB... //Input: Two XMM registers (24 uint8 elements) ordered RGBRGB... //Output: Three XMM registers ordered RRR..., GGG... and BBB... // Unpack the result from ui1nt8 elements to uint16 elements. static __attribute__((always_inline)) inline void GatherRGBx8_avx( const __m256i gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0, const __m256i bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA, __m256i *rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, __m256i *gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, __m256i *bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0) { // shuffle mask to group both lanes and order the groups as described const __m256i brg_gbr_shuffle_mask = _mm256_set_epi8(13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3, 0, /**/ 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3, 0); const __m256i grb_shuffle_mask = _mm256_set_epi8(-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /**/ 14, 11, 8, 5, 2, 13, 10, 7, 4, 1, 15, 12, 9, 6, 3, 0); const __m256i low_blend_mask = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /**/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128); const __m256i middle_blend_mask = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /**/ 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0); const __m256i high_blend_mask = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /**/ 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); const __m256i zero_vec = _mm256_setzero_si256(); // lane split between g4 and g5 __m256i b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0 = _mm256_shuffle_epi8( gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0, brg_gbr_shuffle_mask); //swap lanes __m256i g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5 = _mm256_permute2x128_si256( b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0, b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0, 1); __m256i g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0 = _mm256_blendv_epi8( g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5, b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0, low_blend_mask); // upper lane is not relevant __m256i rF_rE_rD_rC_rB_gF_gE_gD_gC_gB_bF_bE_bD_bC_bB_bA = _mm256_shuffle_epi8( bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA, brg_gbr_shuffle_mask); __m256i r_8bit = _mm256_blendv_epi8( g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, /* */ rF_rE_rD_rC_rB_gF_gE_gD_gC_gB_bF_bE_bD_bC_bB_bA, high_blend_mask); // Blue __m256i g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0 = _mm256_blendv_epi8( g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0, middle_blend_mask); __m256i b_8bit = _mm256_alignr_epi8( /* */ rF_rE_rD_rC_rB_gF_gE_gD_gC_gB_bF_bE_bD_bC_bB_bA, g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0, 6); // Green __m256i xx_xx_xx_xx_xx_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0 = _mm256_alignr_epi8( g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0_b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5, b9_b8_b7_b6_b5_rA_r9_r8_r7_r6_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0_b4_b3_b2_b1_b0_r5_r4_r3_r2_r1_r0, 11); __m256i gF_gE_gD_gC_gB_rF_rE_rD_rC_rB_bF_bE_bD_bC_bB_bA = _mm256_shuffle_epi8( bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA, grb_shuffle_mask); __m256i g_8bit = _mm256_blendv_epi8( xx_xx_xx_xx_xx_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, gF_gE_gD_gC_gB_rF_rE_rD_rC_rB_bF_bE_bD_bC_bB_bA, high_blend_mask); // Move high part of low lane to low part of high lane. __m256i xx_xx_xx_xx_xx_xx_xx_xx_rF_rE_rD_rC_rB_rA_r9_r8_xx_xx_xx_xx_xx_xx_xx_xx_r7_r6_r5_r4_r3_r2_r1_r0 = _mm256_permute4x64_epi64(r_8bit, _MM_SHUFFLE(0, 1, 0, 0)); __m256i xx_xx_xx_xx_xx_xx_xx_xx_gF_gE_gD_gC_gB_gA_g9_g8_xx_xx_xx_xx_xx_xx_xx_xx_g7_g6_g5_g4_g3_g2_g1_g0 = _mm256_permute4x64_epi64(g_8bit, _MM_SHUFFLE(0, 1, 0, 0)); __m256i xx_xx_xx_xx_xx_xx_xx_xx_bF_bE_bD_bC_bB_bA_b9_b8_xx_xx_xx_xx_xx_xx_xx_xx_b7_b6_b5_b4_b3_b2_b1_b0 = _mm256_permute4x64_epi64(b_8bit, _MM_SHUFFLE(0, 1, 0, 0)); // Unpack uint8 elements to uint16 elements. // _mm256_cvtepu8_epi16 can't be used in replacement for _mm_cvtepu8_epi16, because it converts _mm128i to _mm256i. // We have _m256i as input *rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0 = _mm256_unpacklo_epi8( xx_xx_xx_xx_xx_xx_xx_xx_rF_rE_rD_rC_rB_rA_r9_r8_xx_xx_xx_xx_xx_xx_xx_xx_r7_r6_r5_r4_r3_r2_r1_r0, zero_vec); *gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0 = _mm256_unpacklo_epi8( xx_xx_xx_xx_xx_xx_xx_xx_gF_gE_gD_gC_gB_gA_g9_g8_xx_xx_xx_xx_xx_xx_xx_xx_g7_g6_g5_g4_g3_g2_g1_g0, zero_vec); *bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0 = _mm256_unpacklo_epi8( xx_xx_xx_xx_xx_xx_xx_xx_bF_bE_bD_bC_bB_bA_b9_b8_xx_xx_xx_xx_xx_xx_xx_xx_b7_b6_b5_b4_b3_b2_b1_b0, zero_vec); } //Calculate 16 Grayscale elements from 16 RGB elements. static __attribute__((always_inline)) inline __m256i Rgb2Yx8_avx(__m256i rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, __m256i gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, __m256i bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0) { //Each coefficient is expanded by 2^15, and rounded to int16 (add 0.5 for rounding). const __m256i r_coef = _mm256_set1_epi16((short)(6966.9768)); //0.2126 * 32768.0 + 0.5 | 8 coefficients - R scale factor. const __m256i g_coef = _mm256_set1_epi16((short)(23436.1736)); //0.7152 * 32768.0 + 0.5 | 8 coefficients - G scale factor. const __m256i b_coef = _mm256_set1_epi16((short)(2366.3496)); //0.0722 * 32768.0 + 0.5 | 8 coefficients - B scale factor. //Multiply input elements by 64 for improved accuracy. rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0 = _mm256_slli_epi16(rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, 6); gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0 = _mm256_slli_epi16(gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, 6); bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0 = _mm256_slli_epi16(bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0, 6); //Use the special intrinsic _mm256_mulhrs_epi16 that calculates round(r*r_coef/2^15). //Calculate Y = 0.2989*R + 0.5870*G + 0.1140*B (use fixed point computations) __m256i y7_y6_y5_y4_y3_y2_y1_y0 = _mm256_add_epi16(_mm256_add_epi16( _mm256_mulhrs_epi16(rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, r_coef), _mm256_mulhrs_epi16(gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, g_coef)), _mm256_mulhrs_epi16(bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0, b_coef)); return _mm256_srli_epi16(y7_y6_y5_y4_y3_y2_y1_y0, 6); } void convert_memory_simd_avx(unsigned char *img, int width, int height, int channels, int threads, unsigned char *result) { // double accumulator, each doing 16, double of what sse can do (8) const int pixel_per_iteration = 32; const int size = width * height; const int pixel_per_thread_unaligned = size / threads; // Each FMA instruction can calculate 8 pixels at once, so we need a worksize that is a multiple of it. // Leftover will need to be handled seperatly without FMA by the last thread. const int pixel_per_thread_aligned = ((int)pixel_per_thread_unaligned / pixel_per_iteration) * pixel_per_iteration; #pragma omp parallel for for (int thread = 0; thread < threads; thread++) { int end; if (thread + 1 == threads) { end = ((int)size / pixel_per_iteration) * pixel_per_iteration; } else { end = pixel_per_thread_aligned * (thread + 1); } __m256i rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0; __m256i gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0; __m256i bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0; for (int i = pixel_per_thread_aligned * thread; i < end; i += pixel_per_iteration) { const int pixel_index = i * channels; __m256i gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0 = _mm256_load_si256((__m256i *)&img[pixel_index]); // Load the next 32 bytes, of which on the first 16 are used __m256i bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA = _mm256_load_si256((__m256i *)&img[pixel_index + 32]); //Separate RGB, and put together R elements, G elements and B elements (together in same XMM register). //Result is also unpacked from uint8 to uint16 elements. GatherRGBx8_avx(gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0, bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA, &rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, &gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, &bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0); //Calculate 16 Y elements. __m256i y7_y6_y5_y4_y3_y2_y1_y0 = Rgb2Yx8_avx(rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0); //Calculate another 16 elements, because the store operation _mm256_storeu_si256 can store 32*8=256 bits at once // skip 16*3=48 bytes // Because of this, can't use _mm256_load_si256 where address needs to be 32-byte aligned gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0 = _mm256_loadu_si256((__m256i *)&img[pixel_index + 48]); // skip 16*3+32=80 bytes bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA = _mm256_loadu_si256((__m256i *)&img[pixel_index + 80]); GatherRGBx8_avx(gA_rA_b9_g9_r9_b8_g8_r8_b7_g7_r7_b6_g6_r6_b5_g5_r5_b4_g4_r4_b3_g3_r3_b2_g2_r2_b1_g1_r1_b0_g0_r0, bF_gF_rF_bE_gE_rE_bD_gD_rD_bC_gC_rC_bB_gB_rB_bA, &rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, &gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, &bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0); __m256i yF_yE_yD_yC_yB_yA_y9_y8 = Rgb2Yx8_avx(rF_rE_rD_rC_rB_rA_r9_r8_r7_r6_r5_r4_r3_r2_r1_r0, gF_gE_gD_gC_gB_gA_g9_g8_g7_g6_g5_g4_g3_g2_g1_g0, bF_bE_bD_bC_bB_bA_b9_b8_b7_b6_b5_b4_b3_b2_b1_b0); __m256i yF_yE_yD_yC_y7_y6_y5_y4_yB_yA_y9_y8_y3_y2_y1_y0 = _mm256_packus_epi16(y7_y6_y5_y4_y3_y2_y1_y0, yF_yE_yD_yC_yB_yA_y9_y8); __m256i yF_yE_yD_yC_yB_yA_y9_y8_y7_y6_y5_y4_y3_y2_y1_y0 = _mm256_permute4x64_epi64( yF_yE_yD_yC_y7_y6_y5_y4_yB_yA_y9_y8_y3_y2_y1_y0, _MM_SHUFFLE(3, 1, 2, 0)); _mm256_store_si256((__m256i *)&result[i], yF_yE_yD_yC_yB_yA_y9_y8_y7_y6_y5_y4_y3_y2_y1_y0); } } // calculate remaining pixels. int start = ((int)size / pixel_per_iteration) * pixel_per_iteration; for (int i = start; i < size; i++) { result[i] = 0.2126 * img[(i * channels)] // red + 0.7152 * img[(i * channels) + 1] // green + 0.0722 * img[(i * channels) + 2]; // blue } }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % ImageMagick Image Resize Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2008 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/monitor.h" #include "magick/pixel.h" #include "magick/option.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/string_.h" #include "magick/utility.h" #include "magick/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { MagickRealType (*filter)(const MagickRealType, const ResizeFilter*), (*window)(const MagickRealType, const ResizeFilter*), support, /* filter region of support - the filter support limit */ wsupport, /* window support, usally equal to support (expert only) */ wsize, /* dimension to scale to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ cubic[8]; /* cubic coefficents for smooth Cubic filters */ unsigned long signature; }; /* Forward declaractions. */ static MagickRealType I0(MagickRealType x), BesselOrderOne(MagickRealType); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided, % % They are all internal to this module only. See AcquireResizeFilterInfo() % for details of the access to these functions, via the % GetResizeFilterSupport() and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const MagickRealType x, % const MagickRealType support) % % o x: the distance from the sampling point % generally in the range of 0 to support % The GetResizeFilterWeight() ensures this is a positive value. % % o resize_filter: Current Filter Information % This allows function to access support, and posibly other % pre-calculated information defineding the functions. % */ static MagickRealType Bessel(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* See Pratt "Digital Image Processing" p.97 for Bessel functions This function actually a X-scaled Jinc(x) function. http://mathworld.wolfram.com/JincFunction.html And on page 11 of... http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf */ if (x == 0.0) return((MagickRealType) (MagickPI/4.0)); return(BesselOrderOne(MagickPI*x)/(2.0*x)); } static MagickRealType Blackman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2rd Order cosine windowing function */ return(0.42+0.5*cos(MagickPI*(double) x)+0.08*cos(2.0*MagickPI*(double) x)); } static MagickRealType Bohman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function */ return((1-x)*cos(MagickPI*(double) x)+sin(MagickPI*(double) x)/MagickPI); } static MagickRealType Box(const MagickRealType magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { /* Just return 1.0, filter will still be clipped by its support window */ return(1.0); } #if 0 /* Replaced by CubicBC below */ static MagickRealType Catrom(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Catmull-Rom or Keys Cubic Filter, B=0 C=1/2 */ /* This is the Cubic Spline Interpolator */ if (x < 1.0) return(1.0+x*x*(-2.5+1.5*x)); if (x < 2.0) return(2.0+x*(-4.0+x*(2.5-0.5*x))); return(0.0); } /* Replaced by CubicBC below */ static MagickRealType Cubic(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* B-Spline, B=1 C=0, A Cubic Spline approximation of Gaussian */ if (x < 1.0) return((4.0+x*x*(-6.0+3.0*x))/6.0); if (x < 2.0) return((2.0-x)*(2.0-x)*(2.0-x)/6.0); return(0.0); } /* Replaced by CubicBC below */ static MagickRealType Hermite(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* A Cubic windowing function, B=0, C=0 */ if (x < 1.0) return((2.0*x-3.0)*x*x+1.0); return(0.0); } #endif static MagickRealType CubicBC(const MagickRealType x, const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values :- Mitchell-Netravali B=1/3 C=1/3 Qualitively ideal Cubic Filter Catmull-Rom B= 0 C=1/2 Cublic Interpolation Function Cubic B-Spline B= 1 C= 0 Spline Approximation of Gaussian Hermite B= 0 C= 0 Quadratic Spline (support = 1) See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/ lectures/mitchell/Mitchell.pdf Coefficents are determined from B,C values P0 = ( 6 - 2*B )/6 P1 = 0 P2 = (-18 +12*B + 6*C )/6 P3 = ( 12 - 9*B - 6*C )/6 Q0 = ( 8*B +24*C )/6 Q1 = ( -12*B -48*C )/6 Q2 = ( 6*B +30*C )/6 Q3 = ( - 1*B - 6*C )/6 Which is used to define the filter... P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x <= 2 Which ensures function is continuious in value and derivative (slope) */ if (x < 1.0) return(resize_filter->cubic[0] +x*(resize_filter->cubic[1] +x*(resize_filter->cubic[2] +x*resize_filter->cubic[3] ))); if (x < 2.0) return(resize_filter->cubic[4] +x*(resize_filter->cubic[5] +x*(resize_filter->cubic[6] +x*resize_filter->cubic[7] ))); return(0.0); } static MagickRealType Gaussian(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { return(exp((double) (-2.0*x*x))*sqrt(2.0/MagickPI)); } static MagickRealType Hanning(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* A Cosine windowing function */ return(0.5+0.5*cos(MagickPI*(double) x)); } static MagickRealType Hamming(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* A offset Cosine windowing function */ return(0.54+0.46*cos(MagickPI*(double) x)); } static MagickRealType Kaiser(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Kaiser Windowing Function (bessel windowing) Alpha is a free value from 5 to 8 (currently hardcoded to 6.5) Future: make alpha and the IOA pre-calculation, a 'expert' setting */ #define Alpha 6.5 #define I0A (1.0/I0(Alpha)) return(I0A*I0(Alpha*sqrt((double) (1.0-x*x)))); } static MagickRealType Lagrange(const MagickRealType x, const ResizeFilter *resize_filter) { /* Lagrange Piece-Wise polynomial fit of Sinc N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is for a support of 2, gives a lagrange-4 or piece-wise cubic functions Note that n is the specific piece of the piece-wise function to calculate. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064 */ int N,n,i; MagickRealType value; if ( x > resize_filter->support ) return(0.0); N = (int)(resize_filter->wsupport*2); /* order or number of pieces */ n = (int)(x+((double)N)/2.0); /* which piece does x belong to */ value = 1.0f; for ( i=0; i<N; i++) if ( i != n ) value *= (n-i-x)/(n-i); return value; } #if 0 /* Replaced by CubicBC below */ static MagickRealType Lanczos(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* The lanzcos, sinc windowed sinc Filter But it also ise used as a bessel windowed bessel which complicates the use of the filter. */ if (x < support) return(Sinc(x)*Sinc(x/support)); return(0.0); } static MagickRealType Mitchell(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Mitchell-Netravali B=1/3 C=1/3 Qualitively ideal Cubic Filter */ #define B (1.0/3.0) #define C (1.0/3.0) #define P0 (( 6.0- 2.0*B )/6.0) #define P2 ((-18.0+12.0*B+ 6.0*C)/6.0) #define P3 (( 12.0- 9.0*B- 6.0*C)/6.0) #define Q0 (( 8.0*B+24.0*C)/6.0) #define Q1 (( -12.0*B-48.0*C)/6.0) #define Q2 (( 6.0*B+30.0*C)/6.0) #define Q3 (( - 1.0*B- 6.0*C)/6.0) if (x < 1.0) return(P0+x*x*(P2+x*P3)); if (x < 2.0) return(Q0+x*(Q1+x*(Q2+x*Q3))); return(0.0); } #endif static MagickRealType Quadratic(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 2rd order (quadratic) B-Spline approximation of Gaussian */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static MagickRealType Sinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* This function actually a X-scaled Sinc(x) function. */ if (x == 0.0) return(1.0); return(sin(MagickPI*(double) x)/(MagickPI*(double) x)); } static MagickRealType Triangle(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 1rd order (linear) B-Spline, bilinear interpolation, * Tent 1D filter, or a Bartlett 2D Cone filter */ if (x < 1.0) return(1.0-x); return(0.0); } static MagickRealType Welsh(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Welsh parabolic windowing filter */ if (x < 1.0) return(1 - x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Cubic Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Bessel % % Windowed Sinc/Bessel Method % Blackman Hanning Hamming % Kaiser Lancos (Sinc) % % FIR filters are used as is, and are limited by that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (1.5). % % Requesting a windowed filter will return either a windowed Sinc, for a one % dimentional orthogonal filtering method, such as ResizeImage(), or a % windowed Bessel for image operations requiring a two dimentional % cylindrical filtering method, such a DistortImage(). Which function is % is used set by the "cylindrical" boolean argument. % % Directly requesting 'Sinc' or 'Bessel' will force the use of that filter % function, with a default 'Blackman' windowing method. This is not however % recommended as it removes the correct filter selection for different % filtering image operations. Selecting a window filtering method is better. % % Lanczos is purely special case of a Sinc windowed Sinc, but defulting to % a 3 lobe support, rather that the default 4 lobe support. % % Special options can be used to override specific, or all the filter % settings. However doing so is not advisible unless you have expert % knowledge of the use of resampling filtered techniques. Extreme caution is % advised. % % "filter:filter" Select this function as the filter. % If a "filter:window" operation is not provided, then no windowing % will be performed on the selected filter, (support clipped) % % This can be used to force the use of a windowing method as filter, % request a 'Sinc' filter in a radially filtered operation, or the % 'Bessel' filter for a othogonal filtered operation. % % "filter:window" Select this windowing function for the filter. % While any filter could be used as a windowing function, % using that filters first lobe over the whole support window, % using a non-windowing method is not advisible. % % "filter:lobes" Number of lobes to use for the Sinc/Bessel filter. % This is a simper method of setting filter support size that will % correctly handle the Sinc/Bessel switch for an operators filtering % requirements. % % "filter:support" Set the support size for filtering to the size given % This is not recomented for Sinc/Bessel windowed filters, but is % used for simple filters like FIR filters, and the Gaussian Filter. % This will override any 'filter:lobes' option. % % "filter:blur" Scale the filter and support window by this amount. % A value >1 will generally result in a more burred image with % more ringing effects, while a value <1 will sharpen the % resulting image with more aliasing and Morie effects. % % "filter:win-support" Scale windowing function to this size instead. % This causes the windowing (or self-windowing Lagrange filter) % to act is if the support winodw it much much larger than what % is actually supplied to the calling operator. The filter however % is still clipped to the real support size given. If unset this % will equal the normal filter support size. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic type of filter % If only one of these are given it is assumes to be a 'Keys' % type of filter such that B+2C=1, where Keys 'alpha' value = C % % "filter:verbose" Output verbose plotting data for graphing the % resulting filter over the whole support range (with blur effect). % % Set a true un-windowed Sinc filter with 10 lobes (very slow) % -set option:filter:filter Sinc % -set option:filter:lobes 8 % % For example force an 8 lobe Lanczos (Sinc or Bessel) filter... % -filter Lanczos % -set option:filter:lobes 8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterTypes filter_type, const MagickBooleanType radial, % ExceptionInfo *exception) % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % % o blur: blur the filter by this amount, use 1.0 if unknown. % % o radial: 1D orthogonal filter (Sinc) or 2D radial filter (Bessel) % % o exception: Return any errors or warnings in this structure. % */ MagickExport ResizeFilter *AcquireResizeFilter(const Image *image, const FilterTypes filter, const MagickRealType blur, const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterTypes filter_type, window_type; long filter_artifact; MagickRealType B, C; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. A 'Sinc' filter function (must be windowed) could be upgraded to a 'Bessel' filter if a "cylindrical" filter is requested, unless a "Sinc" filter specifically request. */ static struct { FilterTypes filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* undefined */ { PointFilter, BoxFilter }, /* special, nearest-neighbour filter */ { BoxFilter, BoxFilter }, /* Box averaging Filter */ { TriangleFilter, BoxFilter }, /* Linear Interpolation Filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFilter, HanningFilter }, /* Hanning -- Cosine-Sinc */ { SincFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFilter, BlackmanFilter }, /* Blackman -- 2*Cosine-Sinc */ { GaussianFilter, BoxFilter }, /* Gaussain Blurring filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approximation */ { CubicFilter, BoxFilter }, /* Cubic Gaussian approximation */ { CatromFilter, BoxFilter }, /* Cubic Interpolator */ { MitchellFilter, BoxFilter }, /* 'ideal' Cubic Filter */ { LanczosFilter, SincFilter }, /* Special, 3 lobed Sinc-Sinc */ { BesselFilter, BlackmanFilter }, /* 3 lobed bessel -specific request */ { SincFilter, BlackmanFilter }, /* 4 lobed sinc - specific request */ { SincFilter, KaiserFilter }, /* Kaiser -- SqRoot-Sinc */ { SincFilter, WelshFilter }, /* Welsh -- Parabolic-Sinc */ { SincFilter, CubicFilter }, /* Parzen -- Cubic-Sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing filter */ { SincFilter, BohmanFilter }, /* Bohman -- 2*Cosine-Sinc */ { SincFilter, TriangleFilter } /* Bartlett -- Triangle-Sinc */ }; /* Table maping the filter/window function from the above table to the actual filter/window function call to use. The default support size for that filter as a weighting function, and the point to scale when that function is used as a windowing function (typ 1.0). */ static struct { MagickRealType (*function)(const MagickRealType, const ResizeFilter*), support, /* default support size for function as a filter */ wsize, /* size windowing function, for scaling windowing function */ B,C; /* Cubic Filter factors for a CubicBC function, else ignored */ } const filters[SentinelFilter] = { { Box, 0.0f, 0.5f, 0.0f, 0.0f }, /* Undefined */ { Box, 0.0f, 0.5f, 0.0f, 0.0f }, /* Point */ { Box, 0.5f, 0.5f, 0.0f, 0.0f }, /* Box */ { Triangle, 1.0f, 1.0f, 0.0f, 0.0f }, /* Triangle */ { CubicBC, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hermite, Cubic B=C=0 */ { Hanning, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hanning, Cosine window */ { Hamming, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hamming, '' variation */ { Blackman, 1.0f, 1.0f, 0.0f, 0.0f }, /* Blackman, 2*cos window */ { Gaussian, 1.5f, 1.5f, 0.0f, 0.0f }, /* Gaussian */ { Quadratic, 1.5f, 1.5f, 0.0f, 0.0f }, /* Quadratic Gaussian */ { CubicBC, 2.0f, 2.0f, 1.0f, 0.0f }, /* B-Spline of Gaussian B=1 C=0 */ { CubicBC, 2.0f, 1.0f, 0.0f, 0.5f }, /* Catmull-Rom B=0 C=1/2 */ { CubicBC, 2.0f, 1.0f, 1.0f/3.0f, 1.0f/3.0f }, /* Mitchel B=C=1/3 */ { Sinc, 3.0f, 1.0f, 0.0f, 0.0f }, /* Lanczos, 3 lobed Sinc-Sinc */ { Bessel, 3.2383f,1.2197f,.0f,.0f }, /* 3 lobed Blackman-Bessel */ { Sinc, 4.0f, 1.0f, 0.0f, 0.0f }, /* 4 lobed Blackman-Sinc */ { Kaiser, 1.0f, 1.0f, 0.0f, 0.0f }, /* Kaiser, sq-root windowing */ { Welsh, 1.0f, 1.0f, 0.0f, 0.0f }, /* Welsh, Parabolic windowing */ { CubicBC, 2.0f, 2.0f, 1.0f, 0.0f }, /* Parzen, B-Spline windowing */ { Lagrange, 2.0f, 1.0f, 0.0f, 0.0f }, /* Lagrangian Filter */ { Bohman, 1.0f, 1.0f, 0.0f, 0.0f }, /* Bohman, 2*Cosine windowing */ { Triangle, 1.0f, 1.0f, 0.0f, 0.0f } /* Bartlett, Triangle windowing */ }; /* The known zero crossings of the Bessel() or the Jinc(x*PI) function Found by using http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp for Jv-function with v=1, then dividing X-roots by PI (tabled below) */ static MagickRealType bessel_zeros[16] = { 1.21966989126651f, 2.23313059438153f, 3.23831548416624f, 4.24106286379607f, 5.24276437687019f, 6.24392168986449f, 7.24475986871996f, 8.24539491395205f, 9.24589268494948f, 10.2462933487549f, 11.2466227948779f, 12.2468984611381f, 13.2471325221811f, 14.2473337358069f, 15.2475085630373f, 16.247661874701f }; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter)); if (resize_filter == (ResizeFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* defaults for the requested filter */ filter_type = mapping[filter].filter; window_type = mapping[filter].window; if ( cylindrical == MagickTrue && filter != SincFilter ) { /* promote 1D Sinc Filter to a 2D Bessel filter */ if ( filter_type == SincFilter ) filter_type = BesselFilter; /* Prompote Lanczos (Sinc-Sinc) to Lanczos (Bessel-Bessel) */ else if ( filter_type == LanczosFilter ) { filter_type = BesselFilter; window_type = BesselFilter; } /* FUTURE: blur other filters by 1.22 for cylindrical usage??? */ } /* Override Filter Selection */ artifact=GetImageArtifact(image,"filter:filter"); if (artifact != (const char *) NULL) { /* raw filter request - no window function */ filter_artifact=ParseMagickOption(MagickFilterOptions, MagickFalse,artifact); if ( UndefinedFilter < filter_artifact && filter_artifact < SentinelFilter ) { filter_type = (FilterTypes) filter_artifact; window_type = BoxFilter; } /* Lanczos is nor a real filter but a self windowing Sinc/Bessel */ if ( filter_artifact == LanczosFilter ) { filter_type = (cylindrical==MagickTrue) ? BesselFilter : LanczosFilter; window_type = (cylindrical==MagickTrue) ? BesselFilter : SincFilter; } /* Filter overwide with a specific window function? */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { filter_artifact=ParseMagickOption(MagickFilterOptions, MagickFalse,artifact); if ( UndefinedFilter < filter_artifact && filter_artifact < SentinelFilter ) { if ( filter_artifact != LanczosFilter ) window_type = (FilterTypes) filter_artifact; else window_type = (cylindrical==MagickTrue) ? BesselFilter : SincFilter; } } } else { /* window specified, but no filter function? Assume Sinc/Bessel */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { filter_artifact=ParseMagickOption(MagickFilterOptions,MagickFalse, artifact); if ( UndefinedFilter < filter_artifact && filter_artifact < SentinelFilter ) { filter_type = (cylindrical==MagickTrue) ? BesselFilter : SincFilter; if ( filter_artifact != LanczosFilter ) window_type = (FilterTypes) filter_artifact; else window_type = filter_type; } } } resize_filter->filter = filters[filter_type].function; resize_filter->support = filters[filter_type].support; resize_filter->window = filters[window_type].function; resize_filter->wsize = filters[window_type].wsize; resize_filter->blur = blur; resize_filter->signature=MagickSignature; /* Filter support overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { long lobes = atol(artifact); if ( lobes < 1 ) lobes = 1; resize_filter->support = (MagickRealType) lobes; if ( filter_type == BesselFilter ) { if ( lobes > 16 ) lobes = 16; resize_filter->support = bessel_zeros[lobes-1]; } } artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support = fabs(atof(artifact)); /* Scale windowing function separatally to the support 'clipping' window that calling operator is planning to actually use. - Expert Use Only */ resize_filter->wsupport = resize_filter->support; artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->wsupport = fabs(atof(artifact)); /* Filter blur -- scaling both filter and support window */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur = atof(artifact); if ( resize_filter->blur < MagickEpsilon ) resize_filter->blur = (MagickRealType) MagickEpsilon; /* Set Cubic Spline B,C values, calculate Cubic coefficents */ B=0.0; C=0.0; if ( filters[filter_type].function == CubicBC || filters[window_type].function == CubicBC ) { if ( filters[filter_type].function == CubicBC ) { B=filters[filter_type].B; C=filters[filter_type].C; } else if ( filters[window_type].function == CubicBC ) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=atof(artifact); C=1.0-2.0*B; /* Calculate C as if it is a Keys cubic filter */ artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) C=atof(artifact); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=atof(artifact); B=(1.0-C)/2.0; /* Calculate B as if it is a Keys cubic filter */ } } /* Convert B,C values into Cubic Coefficents - See CubicBC() */ resize_filter->cubic[0]=( 6.0 -2.0*B )/6.0; resize_filter->cubic[1]=0.0; resize_filter->cubic[2]=(-18.0+12.0*B+ 6.0*C)/6.0; resize_filter->cubic[3]=( 12.0- 9.0*B- 6.0*C)/6.0; resize_filter->cubic[4]=( 8.0*B+24.0*C)/6.0; resize_filter->cubic[5]=( -12.0*B-48.0*C)/6.0; resize_filter->cubic[6]=( 6.0*B+30.0*C)/6.0; resize_filter->cubic[7]=( - 1.0*B- 6.0*C)/6.0; } /* Output filter graph -- for graphing filter result */ artifact=GetImageArtifact(image,"filter:verbose"); if (artifact != (const char *) NULL) { MagickRealType x; MagickRealType s = GetResizeFilterSupport(resize_filter); printf("# support = %lg\n", (double) s); for ( x = 0.0; x <= s; x+=0.01f ) printf("%5.2lf\t%lf\n", (double) x, (double) GetResizeFilterWeight(resize_filter, x) ); printf("%5.2lf\t%lf\n", (double) s, 0.0 ); } return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image, % const unsigned long columns,const unsigned long rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const unsigned long columns,const unsigned long rows,ExceptionInfo *exception) { #define AdaptiveResizeImageTag "Resize/Image" Image *resize_image; long y; MagickBooleanType status; MagickPixelPacket pixel; PointInfo offset; register IndexPacket *resize_indexes; register long x; register PixelPacket *q; ResampleFilter *resample_filter; ViewInfo *resize_view; /* Adaptively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); resize_image=DestroyImage(resize_image); return((Image *) NULL); } GetMagickPixelPacket(image,&pixel); resample_filter=AcquireResampleFilter(image,exception); if (image->interpolate == UndefinedInterpolatePixel) (void) SetResampleFilterInterpolateMethod(resample_filter, MeshInterpolatePixel); resize_view=OpenCacheView(resize_image); for (y=0; y < (long) resize_image->rows; y++) { q=SetCacheView(resize_view,0,y,resize_image->columns,1); if (q == (PixelPacket *) NULL) break; resize_indexes=GetIndexes(resize_image); offset.y=((MagickRealType) y*image->rows/resize_image->rows); for (x=0; x < (long) resize_image->columns; x++) { offset.x=((MagickRealType) x*image->columns/resize_image->columns); pixel=ResamplePixelColor(resample_filter,offset.x-0.5,offset.y-0.5); SetPixelPacket(resize_image,&pixel,q,resize_indexes+x); q++; } if (SyncCacheView(resize_view) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(y,image->rows) != MagickFalse)) { status=image->progress_monitor(AdaptiveResizeImageTag,y,image->rows, image->client_data); if (status == MagickFalse) break; } } resample_filter=DestroyResampleFilter(resample_filter); resize_view=CloseCacheView(resize_view); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0: % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % MagickRealType BesselOrderOne(MagickRealType x) % % A description of each parameter follows: % % o x: MagickRealType value. % */ #undef I0 static MagickRealType I0(MagickRealType x) { MagickRealType sum, t, y; register long i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((MagickRealType) i*i); } return(sum); } #undef J1 static MagickRealType J1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static MagickRealType P1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static MagickRealType Q1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType BesselOrderOne(MagickRealType x) { MagickRealType p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the AcquireResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); resize_filter->signature=(~MagickSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickExport MagickRealType GetResizeFilterSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter, % const MagickRealType x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickExport MagickRealType GetResizeFilterWeight( const ResizeFilter *resize_filter,const MagickRealType x) { MagickRealType x_blur, wscale; assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); x_blur = fabs(x)/resize_filter->blur; /* X position with blur scaling */ /* windowing function - scale the weighting filter by this amount */ if ( resize_filter->wsupport < MagickEpsilon || resize_filter->window == Box ) wscale = 1.0; /* Point/Box Filter -- avoid division by zero */ else { wscale = resize_filter->wsize/resize_filter->wsupport; /* scale window */ wscale = resize_filter->window(x_blur*wscale,resize_filter); } /* weighting for the filter at this position */ return( wscale*resize_filter->filter(x_blur,resize_filter) ); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() is a convenience method that scales an image proportionally % to twice its size. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { Image *magnify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); magnify_image=ResizeImage(image,2*image->columns,2*image->rows,CubicFilter, 1.0,exception); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally % to half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,CubicFilter, 1.0,exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; unsigned long height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=(unsigned long) (x_resolution*image->columns/ (image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5); height=(unsigned long) (y_resolution*image->rows/ (image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5); resample_image=ResizeImage(image,width,height,filter,blur,exception); if (resample_image != (Image *) NULL) { resample_image->x_resolution=x_resolution; resample_image->y_resolution=y_resolution; } return(resample_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image, % const unsigned long columns,const unsigned long rows, % const double delta_x,const double rigidity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image, const unsigned long columns,const unsigned long rows, const double delta_x,const double rigidity,ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" const char *map; guchar *packet; Image *rescale_image; int x, y; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MagickPixelPacket pixel; register IndexPacket *rescale_indexes; register PixelPacket *q; unsigned char *pixels; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ZoomImage(image,columns,rows,exception)); if ((columns >= (2*image->columns)) || (rows >= (2*image->rows))) { Image *resize_image; unsigned long height, width; /* Honor liquid resize size limitations. */ for (width=image->columns; columns >= (2*width-1); width*=2); for (height=image->rows; rows >= (2*height-1); height*=2); resize_image=ResizeImage(image,width,height,image->filter,image->blur, exception); if (resize_image == (Image *) NULL) return((Image *) NULL); rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x, rigidity,exception); resize_image=DestroyImage(resize_image); return(rescale_image); } map="RGB"; if (image->matte == MagickFalse) map="RGBA"; if (image->colorspace == CMYKColorspace) { map="CMYK"; if (image->matte == MagickFalse) map="CMYKA"; } pixels=(unsigned char *) AcquireQuantumMemory(image->columns,image->rows* strlen(map)*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) return((Image *) NULL); status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel, pixels,exception); if (status == MagickFalse) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map)); if (carver == (LqrCarver *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,columns,rows); rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse) { InheritException(exception,&rescale_image->exception); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } GetMagickPixelPacket(rescale_image,&pixel); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan(carver,&x,&y,&packet) != 0) { q=SetImagePixels(rescale_image,x,y,1,1); if (q == (PixelPacket *) NULL) break; rescale_indexes=GetIndexes(rescale_image); pixel.red=QuantumRange*(packet[0]/255.0); pixel.green=QuantumRange*(packet[1]/255.0); pixel.blue=QuantumRange*(packet[2]/255.0); if (image->colorspace != CMYKColorspace) { if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[3]/255.0); } else { pixel.index=QuantumRange*(packet[3]/255.0); if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[4]/255.0); } SetPixelPacket(rescale_image,&pixel,q,rescale_indexes); if (SyncImagePixels(rescale_image) == MagickFalse) break; } /* Relinquish resources. */ lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const unsigned long magick_unused(columns), const unsigned long magick_unused(rows), const double magick_unused(delta_x), const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using % the given filter (see AcquireFilterInfo() ). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const unsigned long columns, % const unsigned long rows,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % Typically set this to 1.0. % % o exception: Return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { MagickRealType weight; long pixel; } ContributionInfo; static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType x_factor, const MagickSizeType span,MagickOffsetType *quantum,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" ContributionInfo *contribution; long j, n, start, stop, x; MagickBooleanType status; MagickPixelPacket pixel, zero; MagickRealType alpha, center, density, gamma, scale, support; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *resize_indexes; register long i, y; register PixelPacket *resize_pixels; /* Apply filter to resize horizontally from image to resize_image. */ scale=MagickMax(1.0/x_factor,1.0); support=scale*GetResizeFilterSupport(resize_filter); resize_image->storage_class=image->storage_class; if (support > 0.5) { if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } } else { /* Support too small even for nearest neighbour Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contribution=(ContributionInfo *) AcquireQuantumMemory((size_t) (2.0*support+ 3.0),sizeof(*contribution)); if (contribution == (ContributionInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); for (x=0; x < (long) resize_image->columns; x++) { center=(MagickRealType) (x+0.5)/x_factor; start=(long) (MagickMax(center-support-MagickEpsilon,0.0)+0.5); stop=(long) (MagickMin(center+support,(double) image->columns)+0.5); density=0.0; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-center+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[i].weight*=density; } pixels=AcquireImagePixels(image,contribution[0].pixel,0,(unsigned long) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); resize_pixels=SetImagePixels(resize_image,x,0,1,resize_image->rows); if ((pixels == (const PixelPacket *) NULL) || (resize_pixels == (PixelPacket *) NULL)) break; indexes=AcquireIndexes(image); resize_indexes=GetIndexes(resize_image); #pragma omp parallel for private(alpha, gamma, i, j, pixel) for (y=0; y < (long) resize_image->rows; y++) { pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=alpha*(pixels+j)->opacity; } resize_pixels[y].red=RoundToQuantum(pixel.red); resize_pixels[y].green=RoundToQuantum(pixel.green); resize_pixels[y].blue=RoundToQuantum(pixel.blue); resize_pixels[y].opacity=RoundToQuantum(pixel.opacity); } else { gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=contribution[i].weight*(pixels+j)->opacity; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_pixels[y].red=RoundToQuantum(gamma*pixel.red); resize_pixels[y].green=RoundToQuantum(gamma*pixel.green); resize_pixels[y].blue=RoundToQuantum(gamma*pixel.blue); resize_pixels[y].opacity=RoundToQuantum(pixel.opacity); } if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.index+=alpha*indexes[j]; } resize_indexes[y]=(IndexPacket) RoundToQuantum(pixel.index); } else { gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.index+=alpha*indexes[j]; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_indexes[y]=(IndexPacket) RoundToQuantum(gamma*pixel.index); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i-start].pixel-contribution[0].pixel); resize_indexes[y]=indexes[j]; } } if (SyncImagePixels(resize_image) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(*quantum,span) != MagickFalse)) { status=image->progress_monitor(ResizeImageTag,(MagickOffsetType) *quantum,span,image->client_data); if (status == MagickFalse) break; } (*quantum)++; } contribution=(ContributionInfo *) RelinquishMagickMemory(contribution); return(x == (long) resize_image->columns ? MagickTrue : MagickFalse); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType y_factor, const MagickSizeType span,MagickOffsetType *quantum,ExceptionInfo *exception) { ContributionInfo *contribution; long j, n, start, stop, y; MagickBooleanType status; MagickPixelPacket pixel, zero; MagickRealType alpha, center, density, gamma, scale, support; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *resize_indexes; register long i, x; register PixelPacket *resize_pixels; /* Apply filter to resize vertically from image to resize_image. */ scale=MagickMax(1.0/y_factor,1.0); support=scale*GetResizeFilterSupport(resize_filter); resize_image->storage_class=image->storage_class; if (support > 0.5) { if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } } else { /* Support too small even for nearest neighbour Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contribution=(ContributionInfo *) AcquireQuantumMemory((size_t) (2.0*support+ 3.0),sizeof(*contribution)); if (contribution == (ContributionInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); for (y=0; y < (long) resize_image->rows; y++) { center=(MagickRealType) (y+0.5)/y_factor; start=(long) (MagickMax(center-support-MagickEpsilon,0.0)+0.5); stop=(long) (MagickMin(center+support,(double) image->rows)+0.5); density=0.0; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-center+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[i].weight*=density; } pixels=AcquireImagePixels(image,0,contribution[0].pixel,image->columns, (unsigned long) (contribution[n-1].pixel-contribution[0].pixel+1), exception); resize_pixels=SetImagePixels(resize_image,0,y,resize_image->columns,1); if ((pixels == (const PixelPacket *) NULL) || (resize_pixels == (PixelPacket *) NULL)) break; indexes=AcquireIndexes(image); resize_indexes=GetIndexes(resize_image); #pragma omp parallel for private(alpha, gamma, i, j, pixel) for (x=0; x < (long) resize_image->columns; x++) { gamma=0.0; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=alpha*(pixels+j)->opacity; } resize_pixels[x].red=RoundToQuantum(pixel.red); resize_pixels[x].green=RoundToQuantum(pixel.green); resize_pixels[x].blue=RoundToQuantum(pixel.blue); resize_pixels[x].opacity=RoundToQuantum(pixel.opacity); } else { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=contribution[i].weight*(pixels+j)->opacity; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_pixels[x].red=RoundToQuantum(gamma*pixel.red); resize_pixels[x].green=RoundToQuantum(gamma*pixel.green); resize_pixels[x].blue=RoundToQuantum(gamma*pixel.blue); resize_pixels[x].opacity=RoundToQuantum(pixel.opacity); } if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { gamma=0.0; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.index+=alpha*indexes[j]; } resize_indexes[x]=(IndexPacket) RoundToQuantum(pixel.index); } else { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.index+=alpha*indexes[j]; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_indexes[x]=(IndexPacket) RoundToQuantum(gamma*pixel.index); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=(long) ((contribution[i-start].pixel-contribution[0].pixel)* image->columns+x); resize_indexes[x]=indexes[j]; } } if (SyncImagePixels(resize_image) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(*quantum,span) != MagickFalse)) { status=image->progress_monitor(ResizeImageTag,(MagickOffsetType) *quantum,span,image->client_data); if (status == MagickFalse) break; } (*quantum)++; } contribution=(ContributionInfo *) RelinquishMagickMemory(contribution); return(y == (long) resize_image->rows ? MagickTrue : MagickFalse); } MagickExport Image *ResizeImage(const Image *image,const unsigned long columns, const unsigned long rows,const FilterTypes filter,const double blur, ExceptionInfo *exception) { FilterTypes filter_type; Image *filter_image, *resize_image; MagickRealType x_factor, y_factor; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; MagickOffsetType quantum; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); /* Initialize resize image attributes. */ if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter) && (blur == 1.0)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); /* Scaling, Acquire filter, and contribution info */ x_factor=(MagickRealType) resize_image->columns/(MagickRealType) image->columns; y_factor=(MagickRealType) resize_image->rows/(MagickRealType) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse, exception); /* Resize image. */ quantum=0; if ((columns*((MagickSizeType) image->rows+rows)) > (rows*((MagickSizeType) image->columns+columns))) { filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_image=DestroyImage(resize_image); resize_filter=DestroyResizeFilter(resize_filter); return((Image *) NULL); } span=(MagickSizeType) (filter_image->columns+resize_image->rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &quantum,exception); status|=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&quantum,exception); } else { filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_image=DestroyImage(resize_image); resize_filter=DestroyResizeFilter(resize_filter); return((Image *) NULL); } span=(MagickSizeType) (resize_image->columns+filter_image->rows); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &quantum,exception); status|=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&quantum,exception); } /* Free allocated memory. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" Image *sample_image; long j, *x_offset, y, *y_offset; MagickBooleanType status; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *sample_indexes; register long x; register PixelPacket *sample_pixels; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Allocate scan line buffer and column offset buffers. */ x_offset=(long *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); y_offset=(long *) AcquireQuantumMemory((size_t) sample_image->rows, sizeof(*y_offset)); if ((x_offset == (long *) NULL) || (y_offset == (long *) NULL)) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize pixel offsets. */ for (x=0; x < (long) sample_image->columns; x++) x_offset[x]=(long) (((MagickRealType) x+0.5)*image->columns/ sample_image->columns); for (y=0; y < (long) sample_image->rows; y++) y_offset[y]=(long) (((MagickRealType) y+0.5)*image->rows/ sample_image->rows); /* Sample each row. */ j=(-1); pixels=AcquireImagePixels(image,0,0,image->columns,1,exception); indexes=AcquireIndexes(image); for (y=0; y < (long) sample_image->rows; y++) { sample_pixels=SetImagePixels(sample_image,0,y,sample_image->columns,1); if (sample_pixels == (PixelPacket *) NULL) break; sample_indexes=GetIndexes(sample_image); if (j != y_offset[y]) { /* Read a scan line. */ j=y_offset[y]; pixels=AcquireImagePixels(image,0,j,image->columns,1,exception); if (pixels == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); } /* Sample each column. */ for (x=0; x < (long) sample_image->columns; x++) sample_pixels[x]=pixels[x_offset[x]]; if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) for (x=0; x < (long) sample_image->columns; x++) sample_indexes[x]=indexes[x_offset[x]]; if (SyncImagePixels(sample_image) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(y,image->rows) != MagickFalse)) { status=image->progress_monitor(SampleImageTag,y,image->rows, image->client_data); if (status == MagickFalse) break; } } y_offset=(long *) RelinquishMagickMemory(y_offset); x_offset=(long *) RelinquishMagickMemory(x_offset); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" Image *scale_image; long number_rows, y; MagickBooleanType next_column, next_row, status; MagickPixelPacket pixel, *scale_scanline, *scanline, *x_vector, *y_vector, zero; PointInfo scale, span; register const IndexPacket *indexes; register const PixelPacket *p; register IndexPacket *scale_indexes; register long i, x; register MagickPixelPacket *s, *t; register PixelPacket *q; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse) { InheritException(exception,&scale_image->exception); scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*scanline)); scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) scale_image->columns,sizeof(*scale_scanline)); y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*y_vector)); if ((scanline == (MagickPixelPacket *) NULL) || (scale_scanline == (MagickPixelPacket *) NULL) || (x_vector == (MagickPixelPacket *) NULL) || (y_vector == (MagickPixelPacket *) NULL)) { scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) image->columns* sizeof(*y_vector)); GetMagickPixelPacket(image,&pixel); (void) ResetMagickMemory(&zero,0,sizeof(zero)); i=0; for (y=0; y < (long) scale_image->rows; y++) { q=SetImagePixels(scale_image,0,y,scale_image->columns,1); if (q == (PixelPacket *) NULL) break; scale_indexes=GetIndexes(scale_image); if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=AcquireImagePixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (long) image->rows)) { /* Read a new scanline. */ p=AcquireImagePixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } number_rows++; } for (x=0; x < (long) image->columns; x++) { y_vector[x].red+=scale.y*x_vector[x].red; y_vector[x].green+=scale.y*x_vector[x].green; y_vector[x].blue+=scale.y*x_vector[x].blue; if (scale_image->matte != MagickFalse) y_vector[x].opacity+=scale.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) y_vector[x].index+=scale.y*x_vector[x].index; } span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (long) image->rows)) { /* Read a new scanline. */ p=AcquireImagePixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } number_rows++; next_row=MagickFalse; } s=scanline; for (x=0; x < (long) image->columns; x++) { pixel.red=y_vector[x].red+span.y*x_vector[x].red; pixel.green=y_vector[x].green+span.y*x_vector[x].green; pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue; if (image->matte != MagickFalse) pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index=y_vector[x].index+span.y*x_vector[x].index; s->red=pixel.red; s->green=pixel.green; s->blue=pixel.blue; if (scale_image->matte != MagickFalse) s->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) s->index=pixel.index; s++; y_vector[x]=zero; } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ s=scanline; for (x=0; x < (long) scale_image->columns; x++) { q->red=RoundToQuantum(s->red); q->green=RoundToQuantum(s->green); q->blue=RoundToQuantum(s->blue); if (scale_image->matte != MagickFalse) q->opacity=RoundToQuantum(s->opacity); if (scale_indexes != (IndexPacket *) NULL) scale_indexes[x]=(IndexPacket) RoundToQuantum(s->index); q++; s++; } } else { /* Scale X direction. */ pixel=zero; next_column=MagickFalse; span.x=1.0; s=scanline; t=scale_scanline; for (x=0; x < (long) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { pixel=zero; t++; } pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { pixel=zero; next_column=MagickFalse; t++; } pixel.red+=scale.x*s->red; pixel.green+=scale.x*s->green; pixel.blue+=scale.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=scale.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=scale.x*s->index; span.x-=scale.x; } s++; } if (span.x > 0) { s--; pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; } if ((next_column == MagickFalse) && ((long) (t-scale_scanline) < (long) scale_image->columns)) { t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; } /* Transfer scanline to scaled image. */ t=scale_scanline; for (x=0; x < (long) scale_image->columns; x++) { q->red=RoundToQuantum(t->red); q->green=RoundToQuantum(t->green); q->blue=RoundToQuantum(t->blue); if (scale_image->matte != MagickFalse) q->opacity=RoundToQuantum(t->opacity); if (scale_indexes != (IndexPacket *) NULL) scale_indexes[x]=(IndexPacket) RoundToQuantum(t->index); t++; q++; } } if (SyncImagePixels(scale_image) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(y,image->rows) != MagickFalse)) { status=image->progress_monitor(ScaleImageTag,y,image->rows, image->client_data); if (status == MagickFalse) break; } } /* Free allocated memory. */ y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResizeFilterSupport() specifies which IR filter to use to window % % The format of the SetResizeFilterSupport method is: % % void SetResizeFilterSupport(ResizeFilter *resize_filter, % const MagickRealType support) % % A description of each parameter follows: % % o resize_filter: the resize filter. % % o support: the filter spport radius. % */ MagickExport void SetResizeFilterSupport(ResizeFilter *resize_filter, const MagickRealType support) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); resize_filter->support=support; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image, const unsigned long columns,const unsigned long rows,ExceptionInfo *exception) { char value[MaxTextExtent]; const char *attribute; Image *sample_image, *thumbnail_image; MagickRealType x_factor, y_factor; struct stat attributes; unsigned long version; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > 0.1) { thumbnail_image=ZoomImage(image,columns,rows,exception); if (thumbnail_image != (Image *) NULL) (void) StripImage(thumbnail_image); return(thumbnail_image); } sample_image=SampleImage(image,5*columns,5*rows,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ZoomImage(sample_image,columns,rows,exception); sample_image=DestroyImage(sample_image); if (thumbnail_image == (Image *) NULL) return(thumbnail_image); if (thumbnail_image->matte == MagickFalse) (void) SetImageOpacity(thumbnail_image,OpaqueOpacity); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; (void) StripImage(thumbnail_image); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (strstr(image->magick_filename,"///") == (char *) NULL) (void) FormatMagickString(value,MaxTextExtent,"file:///%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (stat(image->filename,&attributes) == 0) { (void) FormatMagickString(value,MaxTextExtent,"%ld",(long) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value); } (void) FormatMagickString(value,MaxTextExtent,"%ld",(long) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value); (void) FormatMagickString(value,MaxTextExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value); attribute=GetImageProperty(image,"comment"); if ((attribute != (const char *) NULL) && (value != (char *) NULL)) (void) SetImageProperty(thumbnail_image,"description",value); (void) SetImageProperty(thumbnail_image,"software", GetMagickVersion(&version)); (void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value); (void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value); (void) FormatMagickString(value,MaxTextExtent,"%lu", GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value); return(thumbnail_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z o o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZoomImage() creates a new image that is a scaled size of an existing one. % It allocates the memory necessary for the new Image structure and returns a % pointer to the new image. The Point filter gives fast pixel replication, % Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower, % very high-quality results. See Graphic Gems III for details on this % algorithm. % % The filter member of the Image structure specifies which image filter to % use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp. % % The format of the ZoomImage method is: % % Image *ZoomImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o zoom_image: Method ZoomImage returns a pointer to the image after % scaling. A null image is returned if there is a memory shortage. % % o image: the image. % % o columns: An integer that specifies the number of columns in the zoom % image. % % o rows: An integer that specifies the number of rows in the scaled % image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *ZoomImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { Image *zoom_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); return(zoom_image); }
skein_fmt_plug.c
/* Skein cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_skein_256; extern struct fmt_main fmt_skein_512; #elif FMT_REGISTERS_H john_register_one(&fmt_skein_256); john_register_one(&fmt_skein_512); #else #include <string.h> #include "arch.h" #include "sph_skein.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 256bt 512bt // 1 - 233k 232k // 64 - 5406k 5377k // 128 - 6730k 6568k // 256 - 7618k 7405k // 512 - 8243k 8000k // 1k - 8610k 8408k ** this level chosen // 2k - 8804k 8610k // 4k - 8688k 8648k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 64 #else #define OMP_SCALE 1024 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" // Skein-256 or Skein-512 are the real format labels. #define FORMAT_LABEL "Skein" #define FORMAT_NAME "" #define FORMAT_TAG "$skein$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "Skein 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE256 32 #define BINARY_SIZE512 64 #define CMP_SIZE 28 // skein224 #define SALT_SIZE 0 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 static struct fmt_tests skein_256_tests[] = { {"39CCC4554A8B31853B9DE7A1FE638A24CCE6B35A55F2431009E18780335D2621", ""}, {"$skein$39CCC4554A8B31853B9DE7A1FE638A24CCE6B35A55F2431009E18780335D2621", ""}, // john.pot uses lower case {"$skein$39ccc4554a8b31853b9de7a1fe638a24cce6b35a55f2431009e18780335d2621", ""}, {"$skein$0977b339c3c85927071805584d5460d8f20da8389bbe97c59b1cfac291fe9527", "abc"}, {"$skein$8adf4f8a6fabd34384c661e6c0f91efe9750a18ec6c4d02bcaf05b8246a10dcc", "john"}, {"$skein$f2bf66b04f3e3e234d59f8126e52ba60baf2b0bca9aff437d87a6cf3675fe41a", "passweird"}, {NULL} }; static struct fmt_tests skein_512_tests[] = { {"71b7bce6fe6452227b9ced6014249e5bf9a9754c3ad618ccc4e0aae16b316cc8ca698d864307ed3e80b6ef1570812ac5272dc409b5a012df2a579102f340617a", "\xff"}, {"$skein$BC5B4C50925519C290CC634277AE3D6257212395CBA733BBAD37A4AF0FA06AF41FCA7903D06564FEA7A2D3730DBDB80C1F85562DFCC070334EA4D1D9E72CBA7A", ""}, // john.pot uses lower case {"$skein$bc5b4c50925519c290cc634277ae3d6257212395cba733bbad37a4af0fa06af41fca7903d06564fea7a2d3730dbdb80c1f85562dfcc070334ea4d1d9e72cba7a", ""}, {"$skein$8f5dd9ec798152668e35129496b029a960c9a9b88662f7f9482f110b31f9f93893ecfb25c009baad9e46737197d5630379816a886aa05526d3a70df272d96e75", "abc"}, {"$skein$a2f4cbc67def0c97b3b47deeab86e116293db0220ad9953064d40fc4aeabf243d41040efb4e02b4b4883ae19b5c40129926cb5282ad450e1267f126de40224b7", "john"}, {"$skein$88fcd3b1d6b019c74b8853798b1fa7a1b92314748b79d4a8cf822646104b4aefedb7af721ec07bf1302df031411af55bda48d2177a69537f3e0bfb5d2fb1d656", "passweird"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE512 / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self, int len) { char *p; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (strlen(p) != len) return 0; while(*p) if(atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static int valid256(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 64); } static int valid512(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 128); } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + BINARY_SIZE512*2 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE512*2 + 1); strlwr(out + TAG_LENGTH); return out; } static void *get_binary_256(char *ciphertext) { static union { unsigned char c[BINARY_SIZE256]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < BINARY_SIZE256; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *get_binary_512(char *ciphertext) { static union { unsigned char c[BINARY_SIZE512]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < BINARY_SIZE512; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_256(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_skein256_context ctx; sph_skein256_init(&ctx); sph_skein256(&ctx, saved_key[index], strlen(saved_key[index])); sph_skein256_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int crypt_512(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_skein512_context ctx; sph_skein512_init(&ctx); sph_skein512(&ctx, saved_key[index], strlen(saved_key[index])); sph_skein512_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], CMP_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], CMP_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void skein_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_skein_256 = { { "skein-256", "Skein 256", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE256, BINARY_ALIGN, SALT_SIZE, BINARY_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, skein_256_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid256, split, get_binary_256, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, skein_set_key, get_key, fmt_default_clear_keys, crypt_256, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_skein_512 = { { "skein-512", "Skein 512", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE512, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, skein_512_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid512, split, get_binary_512, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, skein_set_key, get_key, fmt_default_clear_keys, crypt_512, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
pagerank.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <pthread.h> #include <omp.h> #include "pagerank.h" #define END_ITER (5E-3 * 5E-3) /** * The struct to store the page and its score */ struct page_score { double score[2]; page* page; double difference; double filler[4]; }; /** * Clean up the allocated page_scores * @param page_scores, the struct of page_scores */ void clean_up(void* page_scores) { if (page_scores == NULL) { return; } free(page_scores); } /** * Initialise the values of the struct array of page scores * @param plist, the list of pages * @param npages, the number of pages * @return the array of page score structs */ struct page_score* init_pageranks(list* plist, int npages) { struct page_score* page_scores = malloc(sizeof(struct page_score) * npages); register double initial_value = 1/(double)(npages); node* current = plist->head; for (size_t i = 0; i < npages; i++) { page_scores[i].page = current->page; page_scores[i].score[0] = initial_value; page_scores[i].score[1] = initial_value; page_scores[i].difference = 0; current = current->next; } return page_scores; } double update_score(struct page_score* page_scores, struct page_score* current_page, double dampener, int x) { double diff = 0.0; double total = 0.0; // Get the list of pages that inlink to this page list* inlist = (*current_page).page->inlinks; // If null then add diffrerence and continue looping if (inlist == NULL) { diff += ((*current_page).score[x] - (*current_page).score[!x]) * ((*current_page).score[x] - (*current_page).score[!x]); return diff; } // Get the node to loop node* current = inlist->head; // Loop through the list while (current != NULL) { total += (page_scores[current->page->index].score[!x]) / ((double)current->page->noutlinks); current = current->next; } (*current_page).score[x] += total * dampener; // Update the new score diff += ((*current_page).score[x] - (*current_page).score[!x]) * ((*current_page).score[x] - (*current_page).score[!x]); return diff; } /** * PageRank algorithm * Given a list of pages calculate the ranking of the pages using a dampening effect * @param plist, list of pages * @param ncores, number of cores * @param npages, number of pages * @param nedges, number of edges * @param dampener, the dampening effect on the pages */ void pagerank_unroll(list* plist, int ncores, int npages, int nedges, double dampener) { // Check for invalid parameters if (plist == NULL || ncores <= 0 || npages <= 0 || nedges < 0 || dampener <= 0) { return; } // Create page scores vector and load register for reused values struct page_score* page_scores = init_pageranks(plist, npages); double dampening_value = (1.0 - dampener)/((double)(npages)); int x = 1; double diff = 1; // Used to check the difference of the scores // Loop through until the convergence threshold is reached while (diff > EPSILON) { diff = 0.0; size_t i = 0; for (; i <= npages - 4; i += 4) { page_scores[i].score[x] = dampening_value; diff += update_score(page_scores, &page_scores[i], dampener, x); page_scores[i + 1].score[x] = dampening_value; diff += update_score(page_scores, &page_scores[i + 1], dampener, x); page_scores[i + 2].score[x] = dampening_value; diff += update_score(page_scores, &page_scores[i + 2], dampener, x); page_scores[i + 3].score[x] = dampening_value; diff += update_score(page_scores, &page_scores[i + 3], dampener, x); } for (; i < npages; i++) { page_scores[i].score[x] = dampening_value; diff += update_score(page_scores, &page_scores[i], dampener, x); } x = (x + 1) % 2; // Update the value so we do not have to copy diff = sqrt(diff); // Get the total difference } // Print the results to stdout int i; for (i = 0; i <= npages - 4; i += 4) { printf("%s %.4lf\n", page_scores[i].page->name, page_scores[i].score[!x]); printf("%s %.4lf\n", page_scores[i + 1].page->name, page_scores[i + 1].score[!x]); printf("%s %.4lf\n", page_scores[i + 2].page->name, page_scores[i + 2].score[!x]); printf("%s %.4lf\n", page_scores[i + 3].page->name, page_scores[i + 3].score[!x]); } // Print the remaining results for (; i < npages; i++) { printf("%s %.4lf\n", page_scores[i].page->name, page_scores[i].score[!x]); } clean_up(page_scores); } /** * PageRank algorithm * Given a list of pages calculate the ranking of the pages using a dampening effect * @param plist, list of pages * @param ncores, number of cores * @param npages, number of pages * @param nedges, number of edges * @param dampener, the dampening effect on the pages */ void pagerank_nopow(list* plist, int ncores, int npages, int nedges, double dampener) { // Check for invalid parameters if (plist == NULL || ncores <= 0 || npages <= 0 || nedges < 0 || dampener <= 0) { return; } // Create page scores vector and load register for reused values struct page_score* page_scores = init_pageranks(plist, npages); register double dampening_value = (1.0 - dampener)/((double)(npages)); register int x = 1; register double diff = 1; // Used to check the difference of the scores // Loop through until the convergence threshold is reached while (diff > EPSILON) { diff = 0.0; for (size_t i = 0; i < npages; i++) { page_scores[i].score[x] = dampening_value; register double total = 0.0; // Get the list of pages that inlink to this page list* inlist = page_scores[i].page->inlinks; // If null then add diffrerence and continue looping if (inlist == NULL) { diff += (page_scores[i].score[x] - page_scores[i].score[!x]) * (page_scores[i].score[x] - page_scores[i].score[!x]); continue; } // Get the node to loop node* current = inlist->head; // TODO can loop from back or loop unrolling // Loop through the list while (current != NULL) { total += (page_scores[current->page->index].score[!x]) / ((double)current->page->noutlinks); current = current->next; } page_scores[i].score[x] += total * dampener; // Update the new score diff += (page_scores[i].score[x] - page_scores[i].score[!x]) * (page_scores[i].score[x] - page_scores[i].score[!x]); } x = (x + 1) % 2; // Update the value so we do not have to copy diff = sqrt(diff); // Get the total difference } // Print the results to stdout for (int i = 0; i < npages; i++) { printf("%s %.4lf\n", page_scores[i].page->name, page_scores[i].score[!x]); } clean_up(page_scores); } /** * PageRank algorithm using the pow from math.h * Given a list of pages calculate the ranking of the pages using a dampening effect * @param plist, list of pages * @param ncores, number of cores * @param npages, number of pages * @param nedges, number of edges * @param dampener, the dampening effect on the pages */ void pagerank_pow(list* plist, int ncores, int npages, int nedges, double dampener) { // Check for invalid parameters if (plist == NULL || ncores <= 0 || npages <= 0 || nedges < 0 || dampener <= 0) { return; } // Create page scores vector and load register for reused values struct page_score* page_scores = init_pageranks(plist, npages); register double dampening_value = (1.0 - dampener)/((double)(npages)); register int x = 1; register double diff = 1; // Used to check the difference of the scores // Loop through until the convergence threshold is reached while (diff > EPSILON) { diff = 0.0; for (size_t i = 0; i < npages; i++) { page_scores[i].score[x] = dampening_value; register double total = 0.0; // Get the list of pages that inlink to this page list* inlist = page_scores[i].page->inlinks; // If null then add diffrerence and continue looping if (inlist == NULL) { diff += pow(page_scores[i].score[x] - page_scores[i].score[!x], 2);; continue; } // Get the node to loop node* current = inlist->head; // TODO can loop from back or loop unrolling // Loop through the list while (current != NULL) { total += (page_scores[current->page->index].score[!x]) / ((double)current->page->noutlinks); current = current->next; } page_scores[i].score[x] += total * dampener; // Update the new score diff += pow(page_scores[i].score[x] - page_scores[i].score[!x], 2); } x = (x + 1) % 2; // Update the value so we do not have to copy diff = sqrt(diff); // Get the total difference } // Print the results to stdout for (int i = 0; i < npages; i++) { printf("%s %.4lf\n", page_scores[i].page->name, page_scores[i].score[!x]); } clean_up(page_scores); } /** * The struct to store the page and its score */ struct page_score_2D { double new_score; double old_score; page* page; }; /** * Initialise the values of the struct array of page scores using just a old and new double value * @param plist, the list of pages * @param npages, the number of pages * @return the array of page score structs */ struct page_score_2D* init_pageranks_2D(list* plist, int npages) { struct page_score_2D* page_scores = malloc(sizeof(struct page_score_2D) * npages); register double initial_value = 1/(double)(npages); node* current = plist->head; for (size_t i = 0; i < npages; i++) { page_scores[i].page = current->page; page_scores[i].old_score = initial_value; page_scores[i].new_score = 0; current = current->next; } return page_scores; } /** * PageRank algorithm using the pow from math.h as well as the inefficient copy of the page_score struct * Given a list of pages calculate the ranking of the pages using a dampening effect * @param plist, list of pages * @param ncores, number of cores * @param npages, number of pages * @param nedges, number of edges * @param dampener, the dampening effect on the pages */ void pagerank_pow_old(list* plist, int ncores, int npages, int nedges, double dampener) { // Check for invalid parameters if (plist == NULL || ncores <= 0 || npages <= 0 || nedges < 0 || dampener <= 0) { return; } // Create page scores vector and load register for reused values struct page_score_2D* page_scores = init_pageranks_2D(plist, npages); register double dampening_value = (1.0 - dampener)/((double)(npages)); register double diff = 1; // Used to check the difference of the scores // Loop through until the convergence threshold is reached while (diff > EPSILON) { diff = 0.0; for (size_t i = 0; i < npages; i++) { page_scores[i].new_score = dampening_value; register double total = 0.0; // Get the list of pages that inlink to this page list* inlist = page_scores[i].page->inlinks; // If null then add diffrerence and continue looping if (inlist == NULL) { diff += pow(page_scores[i].old_score - page_scores[i].old_score, 2);; continue; } // Get the node to loop node* current = inlist->head; // TODO can loop from back or loop unrolling // Loop through the list while (current != NULL) { total += (page_scores[current->page->index].old_score) / ((double)current->page->noutlinks); current = current->next; } page_scores[i].new_score += total * dampener; // Update the new score diff += pow(page_scores[i].new_score - page_scores[i].old_score, 2); } for (size_t i = 0; i < npages; i++) { page_scores[i].old_score = page_scores[i].new_score; } diff = sqrt(diff); // Get the total difference } // Print the results to stdout for (int i = 0; i < npages; i++) { printf("%s %.4lf\n", page_scores[i].page->name, page_scores[i].new_score); } clean_up(page_scores); } struct page_score_padding { page* page; double score[16]; }; /** * Initialise the values of the struct array of page scores * @param plist, the list of pages * @param npages, the number of pages * @return the array of page score structs */ struct page_score_padding* init_pageranks_padding(list* plist, int npages) { struct page_score_padding* page_scores = malloc(sizeof(struct page_score_padding) * npages); register double initial_value = 1/(double)(npages); node* current = plist->head; for (size_t i = 0; i < npages; i++) { page_scores[i].page = current->page; page_scores[i].score[0] = initial_value; page_scores[i].score[15] = initial_value; current = current->next; } return page_scores; } /** * PageRank algorithm OpenMP * Given a list of pages calculate the ranking of the pages using a dampening effect * @param plist, list of pages * @param ncores, number of cores * @param npages, number of pages * @param nedges, number of edges * @param dampener, the dampening effect on the pages */ void pagerank_padding(list* plist, int ncores, int npages, int nedges, double dampener) { // Check for invalid parameters if (plist == NULL || ncores <= 0 || npages <= 0 || nedges < 0 || dampener <= 0) { return; } // Create page scores vector and load register for reused values struct page_score_padding* page_scores = init_pageranks_padding(plist, npages); double dampening_value = (1.0 - dampener)/((double)(npages)); omp_set_num_threads(ncores); double diff = 1; // Used to check the difference of the scores int old_index = 15; int new_index = 0; // Loop through until the convergence threshold is reached while (diff > END_ITER) { diff = 0.0; size_t i; #pragma omp parallel for schedule(dynamic, 4) private(i) for (i = 0; i < npages; i++) { page_scores[i].score[new_index] = dampening_value; double total = 0.0; // Get the list of pages that inlink to this page list* inlist = page_scores[i].page->inlinks; // If null then add diffrerence and continue looping if (inlist == NULL) { continue; } // Get the node to loop node* current = inlist->head; // Loop through the list while (current != NULL) { total += (page_scores[current->page->index].score[old_index]) / ((double)current->page->noutlinks); current = current->next; } page_scores[i].score[new_index] += total * dampener; // Update the new score } #pragma omp parallel for reduction (+:diff) for (i = 0; i < npages; i++) { diff += (page_scores[i].score[new_index] - page_scores[i].score[old_index]) * (page_scores[i].score[new_index] - page_scores[i].score[old_index]); } old_index ^= new_index; new_index ^= old_index; old_index ^= new_index; } // Print the results to stdout #pragma omp parallel for ordered for (int i = 0; i < npages; i++) { // printf("%s %.4lf\n", page_scores[i].page->name, page_scores[i].score[!x]); } clean_up(page_scores); } // The ikj matrix multiply void multiply(const double* mata, size_t mata_width, size_t mata_height, const double* matb, size_t matb_height, double* result_mat, double dampening_value) { if(result_mat) { size_t y, x; #pragma omp parallel shared(mata, matb, result_mat) private(y, x) { #pragma omp for for(y = 0; y < mata_height; y++) { result_mat[y] = dampening_value; for(x = 0; x < matb_height; x++) { (result_mat)[y] += (matb[x] * mata[(y * mata_width) + x]); } } } } } /** * PageRank algorithm OpenMP * Given a list of pages calculate the ranking of the pages using a dampening effect * @param plist, list of pages * @param ncores, number of cores * @param npages, number of pages * @param nedges, number of edges * @param dampener, the dampening effect on the pages */ void pagerank_mm(list* plist, int ncores, int npages, int nedges, double dampener) { // Check for invalid parameters if (plist == NULL || ncores <= 0 || npages <= 0 || nedges < 0 || dampener <= 0) { return; } double* score_vector = malloc(sizeof(double) * npages); double* rank_vector = malloc(sizeof(double) * npages); double* matrix = calloc(sizeof(double), npages * npages); double dampening_value = (1.0 - dampener)/((double)(npages)); double diff = 1; // Used to check the difference of the scores node* current = plist->head; for (int i = 0; i < npages; i++) { if (current->page->inlinks) { node* node = current->page->inlinks->head; while (node) { matrix[i * npages + node->page->index] = dampener/((double)node->page->noutlinks); node = node->next; } } current = current->next; } // Check if columns are 0 then set to 1/N #pragma omp parallel for shared(matrix, score_vector) for (int j = 0; j < npages; j++) { score_vector[j] = 1/((double)npages); double sum = 0; for (int i = 0; i < npages; i++) { if (matrix[(i * npages) + j]) { sum = 1; break; } } if (!sum) { for (int i = 0; i < npages; i++) { matrix[i * npages + j] = dampener/((double)npages); } } } /* * PRINTING VALUES OF MATRIX for (int i = 0; i < npages; i++) { for (int j = 0; j < npages; j++) { printf("%lf ", matrix[i * npages + j]); } printf("\n"); } for (int i = 0; i < npages; i++) { printf("%lf\n", score_vector[i]); } */ omp_set_num_threads(ncores); // Loop through until the convergence threshold is reached while (diff > EPSILON) { diff = 0.0; multiply(matrix, npages, npages, score_vector, npages, rank_vector, dampening_value); #pragma omp parallel for reduction (+:diff) for (int i = 0; i < npages; i++) { diff += (rank_vector[i] - score_vector[i]) * (rank_vector[i] - score_vector[i]); } diff = sqrt(diff); /* #pragma omp parallel for for (int i = 0; i < npages; i++) { rank_vector[i] = rank_vector[i] / norm; } */ // Update for next iteration so that we do not have to copy double* temp = rank_vector; rank_vector = score_vector; score_vector = temp; } // Print the results to stdout current = plist->head; for (int i = 0; i < npages; i++) { printf("%s %.4lf\n", current->page->name, score_vector[i]); current = current->next; } free(score_vector); free(rank_vector); free(matrix); } /** * PageRank algorithm OpenMP * Given a list of pages calculate the ranking of the pages using a dampening effect * @param plist, list of pages * @param ncores, number of cores * @param npages, number of pages * @param nedges, number of edges * @param dampener, the dampening effect on the pages */ void pagerank(list* plist, int ncores, int npages, int nedges, double dampener) { // Check for invalid parameters if (plist == NULL || ncores <= 0 || npages <= 0 || nedges < 0 || dampener <= 0) { return; } // Create page scores vector and load register for reused values struct page_score* page_scores = init_pageranks(plist, npages); double dampening_value = (1.0 - dampener)/((double)(npages)); register int x = 1; omp_set_num_threads(ncores); register double diff = 1; // Used to check the difference of the scores // Loop through until the convergence threshold is reached while (diff > 0.000025) { diff = 0.0; size_t i; #pragma omp parallel for private(i) for (i = 0; i < npages; i++) { page_scores[i].score[x] = dampening_value; double total = 0.0; // Get the list of pages that inlink to this page list* inlist = page_scores[i].page->inlinks; // If null then add diffrerence and continue looping if (inlist == NULL) { page_scores[i].difference = (page_scores[i].score[x] - page_scores[i].score[!x]) * (page_scores[i].score[x] - page_scores[i].score[!x]); continue; } // Get the node to loop node* current = inlist->head; // Loop through the list while (current != NULL) { total += (page_scores[current->page->index].score[!x]) / ((double)current->page->noutlinks); current = current->next; } page_scores[i].score[x] += total * dampener; // Update the new score page_scores[i].difference = (page_scores[i].score[x] - page_scores[i].score[!x]) * (page_scores[i].score[x] - page_scores[i].score[!x]); } #pragma omp parallel for reduction (+:diff) for (i = 0; i < npages; i++) { diff += page_scores[i].difference; } x = (x + 1) % 2; // Update the value so we do not have to copy } // Print the results to stdout for (int i = 0; i < npages; i++) { // printf("%s %.4lf\n", page_scores[i].page->name, page_scores[i].score[!x]); } clean_up(page_scores); } /* ###################################### ### DO NOT MODIFY BELOW THIS POINT ### ###################################### */ int main(void) { /* ###################################################### ### DO NOT MODIFY THE MAIN FUNCTION OR HEADER FILE ### ###################################################### */ list* plist = NULL; double dampener; int ncores, npages, nedges; /* read the input then populate settings and the list of pages */ read_input(&plist, &ncores, &npages, &nedges, &dampener); double start = omp_get_wtime(); pagerank(plist, ncores, npages, nedges, dampener); double end = omp_get_wtime(); printf("%lf\n", end - start); /* clean up the memory used by the list of pages */ page_list_destroy(plist); return 0; }
kernel_cpu.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } // #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #include <omp.h> // (in directory known to compiler) needed by openmp #include <stdlib.h> // (in directory known to compiler) needed by malloc #include <stdio.h> // (in directory known to compiler) needed by printf, stderr #include <assert.h> //======================================================================================================================================================150 // COMMON //======================================================================================================================================================150 #include "common.h" // (in directory provided here) //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "timer.h" // (in directory provided here) //========================================================================================================================================================================================================200 // KERNEL_CPU FUNCTION //========================================================================================================================================================================================================200 void kernel_cpu( int cores_arg, record *records, knode *knodes, long knodes_elem, int order, long maxheight, int count, long *currKnode, long *offset, int *keys, record *ans) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // timer long long time0; long long time1; long long time2; time0 = get_time(); //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; { time1 = get_time(); //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; int i; // process number of querries { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for private (i, thid) for(bid = 0; bid < count; bid++){ // process levels of the tree for(i = 0; i < maxheight; i++){ // process all leaves at each level for(thid = 0; thid < threadsPerBlock; thid++){ // if value is between the two keys if((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid+1] > keys[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that addresses knodes[] in the next iteration goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[offset[bid]].indices[thid] < knodes_elem){ offset[bid] = knodes[offset[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; } //At this point, we have a candidate leaf node which may contain //the target record. Check each key to hopefully find the record // process all leaves at each level for(thid = 0; thid < threadsPerBlock; thid++){ if(knodes[currKnode[bid]].keys[thid] == keys[bid]){ ans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value; } } } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma83_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } time2 = get_time(); } //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of CPU/MCPU KERNEL:\n"); printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time2-time0) * 100); printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time2-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time2-time0) / 1000000); } //========================================================================================================================================================================================================200 // END //========================================================================================================================================================================================================200 // #ifdef __cplusplus // } // #endif
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include "http_stream.h" #include <stdio.h> #include <stdlib.h> #include <string.h> extern int check_mistakes; #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed, int contrastive) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { if (contrastive && (i % 2) == 1) start_time_indexes[i] = start_time_indexes[i - 1]; else start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //printf(" index = %d, ", index); //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths_custom(char **paths, int n, int m, int contrastive) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); int old_index = 0; //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; if (contrastive && (i % 2 == 1)) index = old_index; else old_index = index; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **get_random_paths(char **paths, int n, int m) { return get_random_paths_custom(paths, n, m, 0); } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv, int contrastive) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; const int img_index = (contrastive) ? (i / 2) : i; if(dontuse_opencv) im = load_image_stb_resize(paths[img_index], 0, 0, 3); else im = load_image_color(paths[img_index], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[img_index]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) { printf("\n Error in read_boxes() \n"); getchar(); } *n = 0; return boxes; } const int max_obj_img = 4000;// 30000; const int img_hash = (custom_hash(filename) % max_obj_img)*max_obj_img; //printf(" img_hash = %d, filename = %s; ", img_hash, filename); float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].track_id = count + img_hash; //printf(" boxes[count].track_id = %d, count = %d \n", boxes[count].track_id, count); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, int truth_size, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; int track_id = boxes[i].track_id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); if (check_mistakes) getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*truth_size +0] = x; truth[(i-sub)*truth_size +1] = y; truth[(i-sub)*truth_size +2] = w; truth[(i-sub)*truth_size +3] = h; truth[(i-sub)*truth_size +4] = id; truth[(i-sub)*truth_size +5] = track_id; //float val = track_id; //printf(" i = %d, sub = %d, truth_size = %d, track_id = %d, %f, %f\n", i, sub, truth_size, track_id, truth[(i - sub)*truth_size + 5], val); if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } int find_max(float *arr, int size) { int i; float max = 0; int n = 0; for (i = 0; i < size; ++i) { if (arr[i] > max) { max = arr[i]; n = i; } } return n; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps, int contrastive) { matrix y = make_matrix(n, k); int i; if (labels) { // supervised learning for (i = 0; i < n; ++i) { const int img_index = (contrastive) ? (i / 2) : i; fill_truth_smooth(paths[img_index], labels, k, y.vals[i], label_smooth_eps); //printf(" n = %d, i = %d, img_index = %d, class_id = %d \n", n, i, img_index, find_max(y.vals[i], k)); if (hierarchy) { fill_hierarchy(y.vals[i], k, hierarchy); } } } else { // unsupervised learning for (i = 0; i < n; ++i) { const int img_index = (contrastive) ? (i / 2) : i; const uintptr_t path_p = (uintptr_t)paths[img_index];// abs(random_gen()); const int class_id = path_p % k; int l; for (l = 0; l < k; ++l) y.vals[i][l] = 0; y.vals[i][class_id] = 1; } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, int truth_size, float *old_truth) { int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*truth_size]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*truth_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, int truth_size, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift, int net_w, int net_h, int mosaic_bound) { const float lowest_w = 1.F / net_w; const float lowest_h = 1.F / net_h; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*truth_size]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*truth_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; if(mosaic_bound) { // fix out of Mosaic-bound float left_bound = 0, right_bound = 0, top_bound = 0, bot_bound = 0; if (i_mixup == 0) { left_bound = 0; right_bound = cut_x; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 1) { left_bound = cut_x; right_bound = w; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 2) { left_bound = 0; right_bound = cut_x; top_bound = cut_y; bot_bound = h; } if (i_mixup == 3) { left_bound = cut_x; right_bound = w; top_bound = cut_y; bot_bound = h; } if (left < left_bound) { //printf(" i_mixup = %d, left = %d, left_bound = %f \n", i_mixup, left, left_bound); left = left_bound; } if (right > right_bound) { //printf(" i_mixup = %d, right = %d, right_bound = %f \n", i_mixup, right, right_bound); right = right_bound; } if (top < top_bound) top = top_bound; if (bot > bot_bound) bot = bot_bound; xb = ((float)(right + left) / 2) / w; wb = ((float)(right - left)) / w; yb = ((float)(bot + top) / 2) / h; hb = ((float)(bot - top)) / h; } else { // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; } // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1 && wb > lowest_w && hb > lowest_h) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int use_gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int contrastive_color, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; if (use_mixup == 2 || use_mixup == 4) { printf("\n cutmix=1 - isn't supported for Detector (use cutmix=1 only for Classifier) \n"); if (check_mistakes) getchar(); if(use_mixup == 2) use_mixup = 0; else use_mixup = 3; } if (use_mixup == 3 && letter_box) { //printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); //if (check_mistakes) getchar(); //exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0, gaussian_noise = 0; d.y = make_matrix(n, truth_size*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else random_paths = get_random_paths_custom(paths, n, m, contrastive); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { printf("\n Error in load_data_detection() - OpenCV \n"); fflush(stdout); if (check_mistakes) { getchar(); } continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; // < 0 int min_rdh = oh*(1 - (1 / resize_down)) / 2; // < 0 if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; // > 0 int max_rdh = oh*(1 - (1 / resize_up)) / 2; // > 0 //printf(" down = %f, up = %f \n", (1 - (1 / resize_down)) / 2, (1 - (1 / resize_up)) / 2); if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); if (!contrastive || contrastive_jit_flip || i % 2 == 0) { r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); flip = use_flip ? random_gen() % 2 : 0; } r_scale = random_float(); if (!contrastive || contrastive_color || i % 2 == 0) { dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); } if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } if (use_gaussian_noise && rand_int(0, 1) == 1) gaussian_noise = use_gaussian_noise; else gaussian_noise = 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently //printf(" letter_box = %d \n", letter_box); if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); } // move each 2nd image to the corner - so that most of it was visible if (use_mixup == 3 && random_gen() % 2 == 0) { if (flip) { if (i_mixup == 0) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pleft += pright, pright = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; } else { if (i_mixup == 0) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pleft += pright, pright = 0, ptop += pbot, pbot = 0; } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); //for (int z = 0; z < boxes; ++z) if(truth[z*truth_size] > 0) printf(" track_id = %f \n", truth[z*truth_size + 5]); //printf(" truth_size = %d \n", truth_size); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, boxes, truth_size, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth_size, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth_size, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift, w, h, mosaic_bound); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int contrastive_color, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else random_paths = get_random_paths_custom(paths, n, m, contrastive); //assert(use_mixup < 2); if (use_mixup == 2) { printf("\n cutmix=1 - isn't supported for Detector \n"); exit(0); } if (use_mixup == 3 || use_mixup == 4) { printf("\n mosaic=1 - compile Darknet with OpenCV for using mosaic=1 \n"); exit(0); } int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, truth_size * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; int min_rdh = oh*(1 - (1 / resize_down)) / 2; if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; int max_rdh = oh*(1 - (1 / resize_up)) / 2; if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); if (!contrastive || contrastive_jit_flip || i % 2 == 0) { r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); flip = use_flip ? random_gen() % 2 : 0; } r_scale = random_float(); if (!contrastive || contrastive_color || i % 2 == 0) { dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); } } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, truth_size, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv, a.contrastive); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.truth_size, a.classes, a.flip, a.gaussian_noise, a.blur, a.mixup, a.jitter, a.resize, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.mosaic_bound, a.contrastive, a.contrastive_jit_flip, a.contrastive_color, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } static const int thread_wait_ms = 5; static volatile int flag_exit; static volatile int * run_load_data = NULL; static load_args * args_swap = NULL; static pthread_t* threads = NULL; pthread_mutex_t mtx_load_data = PTHREAD_MUTEX_INITIALIZER; void *run_thread_loop(void *ptr) { const int i = *(int *)ptr; while (!custom_atomic_load_int(&flag_exit)) { while (!custom_atomic_load_int(&run_load_data[i])) { if (custom_atomic_load_int(&flag_exit)) { free(ptr); return 0; } this_thread_sleep_for(thread_wait_ms); } pthread_mutex_lock(&mtx_load_data); load_args *args_local = (load_args *)xcalloc(1, sizeof(load_args)); *args_local = args_swap[i]; pthread_mutex_unlock(&mtx_load_data); load_thread(args_local); custom_atomic_store_int(&run_load_data[i], 0); } free(ptr); return 0; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); if (!threads) { threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); run_load_data = (volatile int *)xcalloc(args.threads, sizeof(int)); args_swap = (load_args *)xcalloc(args.threads, sizeof(load_args)); fprintf(stderr, " Create %d permanent cpu-threads \n", args.threads); for (i = 0; i < args.threads; ++i) { int* ptr = (int*)xcalloc(1, sizeof(int)); *ptr = i; if (pthread_create(&threads[i], 0, run_thread_loop, ptr)) error("Thread creation failed"); } } for (i = 0; i < args.threads; ++i) { args.d = buffers + i; args.n = (i + 1) * total / args.threads - i * total / args.threads; pthread_mutex_lock(&mtx_load_data); args_swap[i] = args; pthread_mutex_unlock(&mtx_load_data); custom_atomic_store_int(&run_load_data[i], 1); // run thread } for (i = 0; i < args.threads; ++i) { while (custom_atomic_load_int(&run_load_data[i])) this_thread_sleep_for(thread_wait_ms); // join } /* pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } */ *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); //free(threads); return 0; } void free_load_threads(void *ptr) { load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; int i; if (threads) { custom_atomic_store_int(&flag_exit, 1); for (i = 0; i < args.threads; ++i) { pthread_join(threads[i], 0); } free((void*)run_load_data); free(args_swap); free(threads); threads = NULL; custom_atomic_store_int(&flag_exit, 0); } } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv, int contrastive) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps, contrastive); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 4 == 0) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 15; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
DRB045-doall1-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Simplest one dimension array computation */ int a[100]; int main() { int i; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<100; i ++ ) { a[i]=i; } #pragma cetus private(i) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<100; i ++ ) { a[i]=(a[i]+1); } #pragma cetus private(i) #pragma loop name main#2 for (i=0; i<100; i ++ ) { printf("%d\n", a[i]); } _ret_val_0=0; return _ret_val_0; }
implicitmidpoint.c
/* Generated by Cython 0.29.6 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [ "/home/matt/miniconda3/envs/dapy3/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h", "/home/matt/miniconda3/envs/dapy3/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h" ], "extra_compile_args": [ "-fopenmp" ], "extra_link_args": [ "-fopenmp" ], "include_dirs": [ "/home/matt/miniconda3/envs/dapy3/lib/python3.7/site-packages/numpy/core/include" ], "name": "dapy.integrators.implicitmidpoint", "sources": [ "dapy/integrators/implicitmidpoint.pyx" ] }, "module_name": "dapy.integrators.implicitmidpoint" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_6" #define CYTHON_HEX_VERSION 0x001D06F0 #define CYTHON_FUTURE_DIVISION 1 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__dapy__integrators__implicitmidpoint #define __PYX_HAVE_API__dapy__integrators__implicitmidpoint /* Early includes */ #include <string.h> #include <stdio.h> #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include <math.h> #include "pythread.h" #include <stdlib.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "dapy/integrators/implicitmidpoint.pyx", "stringsource", "__init__.pxd", "type.pxd", }; /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":776 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":779 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":784 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":785 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":800 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":804 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":805 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":806 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":808 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":809 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":811 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":812 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator; struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":817 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":819 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* "dapy/integrators/implicitmidpoint.pxd":1 * cdef class ImplicitMidpointIntegrator: # <<<<<<<<<<<<<< * * cdef double dt, tol */ struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator { PyObject_HEAD struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_vtab; double dt; double tol; int dim_z; int n_steps_per_update; int max_iters; int n_threads; __Pyx_memviewslice intervals; __Pyx_memviewslice z_temp; __Pyx_memviewslice z_half; __Pyx_memviewslice dz_dt; }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "dapy/integrators/implicitmidpoint.pyx":12 * """Raised when implicit integrator step fails to converge.""" * * cdef class ImplicitMidpointIntegrator: # <<<<<<<<<<<<<< * * def __init__(self, int dim_z, double dt, double tol, int max_iters, */ struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator { void (*update_dz_dt)(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *, __Pyx_memviewslice, double, __Pyx_memviewslice); int (*implicit_midpoint_step)(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *, __Pyx_memviewslice, double, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); void (*partition_particles)(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *, int); }; static struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_vtabptr_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); #define __Pyx_PyObject_Dict_GetItem(obj, name)\ (likely(PyDict_CheckExact(obj)) ?\ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* CalculateMetaclass.proto */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); /* Py3ClassCreate.proto */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc); static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_int(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_int(const char *itemp, PyObject *obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static void __pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_update_dz_dt(CYTHON_UNUSED struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_z, CYTHON_UNUSED double __pyx_v_t, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_dz_dt); /* proto*/ static int __pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_implicit_midpoint_step(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, __Pyx_memviewslice __pyx_v_z, double __pyx_v_time, __Pyx_memviewslice __pyx_v_dz_dt, __Pyx_memviewslice __pyx_v_z_half, __Pyx_memviewslice __pyx_v_z_next); /* proto*/ static void __pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_partition_particles(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, int __pyx_v_n_particles); /* proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'dapy.integrators.implicitmidpoint' */ static PyTypeObject *__pyx_ptype_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator = 0; static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static PyObject *__pyx_f_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator__set_state(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *, PyObject *); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "dapy.integrators.implicitmidpoint" extern int __pyx_module_is_main_dapy__integrators__implicitmidpoint; int __pyx_module_is_main_dapy__integrators__implicitmidpoint = 0; /* Implementation of 'dapy.integrators.implicitmidpoint' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_dt[] = "dt"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_doc[] = "__doc__"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_tol[] = "tol"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dim_z[] = "dim_z"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_empty[] = "empty"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_int32[] = "int32"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_double[] = "double"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_module[] = "__module__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_n_steps[] = "n_steps"; static const char __pyx_k_prepare[] = "__prepare__"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_qualname[] = "__qualname__"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_max_iters[] = "max_iters"; static const char __pyx_k_metaclass[] = "__metaclass__"; static const char __pyx_k_n_threads[] = "n_threads"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_z_particles[] = "z_particles"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_ConvergenceError[] = "ConvergenceError"; static const char __pyx_k_start_time_index[] = "start_time_index"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_ImplicitMidpointIntegrator[] = "ImplicitMidpointIntegrator"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_pyx_unpickle_ImplicitMidpointI[] = "__pyx_unpickle_ImplicitMidpointIntegrator"; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Raised_when_implicit_integrator[] = "Raised when implicit integrator step fails to converge."; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Convergence_error_in_implicit_mi[] = "Convergence error in implicit midpoint step."; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Implicit_mid_point_integrator_fo[] = "Implicit mid-point integrator for ordinary differential equations."; static const char __pyx_k_Incompatible_checksums_s_vs_0x85[] = "Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_dapy_integrators_implicitmidpoin[] = "dapy.integrators.implicitmidpoint"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_ConvergenceError; static PyObject *__pyx_kp_u_Convergence_error_in_implicit_mi; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_ImplicitMidpointIntegrator; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x85; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_kp_s_Raised_when_implicit_integrator; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dapy_integrators_implicitmidpoin; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dim_z; static PyObject *__pyx_n_s_doc; static PyObject *__pyx_n_u_double; static PyObject *__pyx_n_s_dt; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_empty; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_u_int32; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_max_iters; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_metaclass; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_module; static PyObject *__pyx_n_s_n_steps; static PyObject *__pyx_n_s_n_threads; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_prepare; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_unpickle_ImplicitMidpointI; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_qualname; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_start_time_index; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tol; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_z_particles; static int __pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator___init__(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, int __pyx_v_dim_z, double __pyx_v_dt, double __pyx_v_tol, int __pyx_v_max_iters, int __pyx_v_n_threads); /* proto */ static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_2forward_integrate(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, __Pyx_memviewslice __pyx_v_z_particles, int __pyx_v_start_time_index, int __pyx_v_n_steps); /* proto */ static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_4__reduce_cython__(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_6__setstate_cython__(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_139854930; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__22; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__32; static PyObject *__pyx_tuple__33; static PyObject *__pyx_codeobj__27; static PyObject *__pyx_codeobj__34; /* Late includes */ /* "dapy/integrators/implicitmidpoint.pyx":14 * cdef class ImplicitMidpointIntegrator: * * def __init__(self, int dim_z, double dt, double tol, int max_iters, # <<<<<<<<<<<<<< * int n_threads=4): * self.dim_z = dim_z */ /* Python wrapper */ static int __pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_v_dim_z; double __pyx_v_dt; double __pyx_v_tol; int __pyx_v_max_iters; int __pyx_v_n_threads; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dim_z,&__pyx_n_s_dt,&__pyx_n_s_tol,&__pyx_n_s_max_iters,&__pyx_n_s_n_threads,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dim_z)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dt)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 1); __PYX_ERR(0, 14, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tol)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 2); __PYX_ERR(0, 14, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_iters)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 3); __PYX_ERR(0, 14, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n_threads); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 14, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_dim_z = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_dim_z == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14, __pyx_L3_error) __pyx_v_dt = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_dt == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 14, __pyx_L3_error) __pyx_v_tol = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_tol == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 14, __pyx_L3_error) __pyx_v_max_iters = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_max_iters == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14, __pyx_L3_error) if (values[4]) { __pyx_v_n_threads = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_n_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15, __pyx_L3_error) } else { __pyx_v_n_threads = ((int)4); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 14, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.ImplicitMidpointIntegrator.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator___init__(((struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self), __pyx_v_dim_z, __pyx_v_dt, __pyx_v_tol, __pyx_v_max_iters, __pyx_v_n_threads); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator___init__(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, int __pyx_v_dim_z, double __pyx_v_dt, double __pyx_v_tol, int __pyx_v_max_iters, int __pyx_v_n_threads) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_RefNannySetupContext("__init__", 0); /* "dapy/integrators/implicitmidpoint.pyx":16 * def __init__(self, int dim_z, double dt, double tol, int max_iters, * int n_threads=4): * self.dim_z = dim_z # <<<<<<<<<<<<<< * self.dt = dt * self.tol = tol */ __pyx_v_self->dim_z = __pyx_v_dim_z; /* "dapy/integrators/implicitmidpoint.pyx":17 * int n_threads=4): * self.dim_z = dim_z * self.dt = dt # <<<<<<<<<<<<<< * self.tol = tol * self.max_iters = max_iters */ __pyx_v_self->dt = __pyx_v_dt; /* "dapy/integrators/implicitmidpoint.pyx":18 * self.dim_z = dim_z * self.dt = dt * self.tol = tol # <<<<<<<<<<<<<< * self.max_iters = max_iters * self.n_threads = n_threads */ __pyx_v_self->tol = __pyx_v_tol; /* "dapy/integrators/implicitmidpoint.pyx":19 * self.dt = dt * self.tol = tol * self.max_iters = max_iters # <<<<<<<<<<<<<< * self.n_threads = n_threads * self.intervals = np.empty((n_threads,), dtype='int32') */ __pyx_v_self->max_iters = __pyx_v_max_iters; /* "dapy/integrators/implicitmidpoint.pyx":20 * self.tol = tol * self.max_iters = max_iters * self.n_threads = n_threads # <<<<<<<<<<<<<< * self.intervals = np.empty((n_threads,), dtype='int32') * self.z_temp = np.empty((n_threads, dim_z), dtype='double') */ __pyx_v_self->n_threads = __pyx_v_n_threads; /* "dapy/integrators/implicitmidpoint.pyx":21 * self.max_iters = max_iters * self.n_threads = n_threads * self.intervals = np.empty((n_threads,), dtype='int32') # <<<<<<<<<<<<<< * self.z_temp = np.empty((n_threads, dim_z), dtype='double') * self.z_half = np.empty((n_threads, dim_z), dtype='double') */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_n_threads); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_u_int32) < 0) __PYX_ERR(0, 21, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_t_4, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_self->intervals, 0); __pyx_v_self->intervals = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "dapy/integrators/implicitmidpoint.pyx":22 * self.n_threads = n_threads * self.intervals = np.empty((n_threads,), dtype='int32') * self.z_temp = np.empty((n_threads, dim_z), dtype='double') # <<<<<<<<<<<<<< * self.z_half = np.empty((n_threads, dim_z), dtype='double') * self.dz_dt = np.empty((n_threads, dim_z), dtype='double') */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_n_threads); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_dim_z); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_n_u_double) < 0) __PYX_ERR(0, 22, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_4, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_self->z_temp, 0); __pyx_v_self->z_temp = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "dapy/integrators/implicitmidpoint.pyx":23 * self.intervals = np.empty((n_threads,), dtype='int32') * self.z_temp = np.empty((n_threads, dim_z), dtype='double') * self.z_half = np.empty((n_threads, dim_z), dtype='double') # <<<<<<<<<<<<<< * self.dz_dt = np.empty((n_threads, dim_z), dtype='double') * */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_n_threads); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_dim_z); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_u_double) < 0) __PYX_ERR(0, 23, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_4, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_self->z_half, 0); __pyx_v_self->z_half = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "dapy/integrators/implicitmidpoint.pyx":24 * self.z_temp = np.empty((n_threads, dim_z), dtype='double') * self.z_half = np.empty((n_threads, dim_z), dtype='double') * self.dz_dt = np.empty((n_threads, dim_z), dtype='double') # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_n_threads); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_dim_z); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_n_u_double) < 0) __PYX_ERR(0, 24, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_4, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_self->dz_dt, 0); __pyx_v_self->dz_dt = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "dapy/integrators/implicitmidpoint.pyx":14 * cdef class ImplicitMidpointIntegrator: * * def __init__(self, int dim_z, double dt, double tol, int max_iters, # <<<<<<<<<<<<<< * int n_threads=4): * self.dim_z = dim_z */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.ImplicitMidpointIntegrator.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dapy/integrators/implicitmidpoint.pyx":29 * @cython.wraparound(False) * @cython.cdivision(True) * cdef void update_dz_dt(self, double[:] z, double t, double[:] dz_dt) nogil: # <<<<<<<<<<<<<< * pass * */ static void __pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_update_dz_dt(CYTHON_UNUSED struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_z, CYTHON_UNUSED double __pyx_v_t, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_dz_dt) { /* function exit code */ } /* "dapy/integrators/implicitmidpoint.pyx":35 * @cython.wraparound(False) * @cython.cdivision(True) * cdef bint implicit_midpoint_step( # <<<<<<<<<<<<<< * self, double[:] z, double time, double[:] dz_dt, * double[:] z_half, double[:] z_next) nogil: */ static int __pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_implicit_midpoint_step(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, __Pyx_memviewslice __pyx_v_z, double __pyx_v_time, __Pyx_memviewslice __pyx_v_dz_dt, __Pyx_memviewslice __pyx_v_z_half, __Pyx_memviewslice __pyx_v_z_next) { double __pyx_v_max_abs_diff; double __pyx_v_abs_diff; double __pyx_v_prev_val; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; /* "dapy/integrators/implicitmidpoint.pyx":40 * cdef double max_abs_diff, abs_diff, prev_val * cdef int i, j * self.update_dz_dt(z, time + self.dt / 2., dz_dt) # <<<<<<<<<<<<<< * for j in range(self.dim_z): * z_next[j] = z[j] + self.dt * dz_dt[j] */ ((struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self->__pyx_vtab)->update_dz_dt(__pyx_v_self, __pyx_v_z, (__pyx_v_time + (__pyx_v_self->dt / 2.)), __pyx_v_dz_dt); /* "dapy/integrators/implicitmidpoint.pyx":41 * cdef int i, j * self.update_dz_dt(z, time + self.dt / 2., dz_dt) * for j in range(self.dim_z): # <<<<<<<<<<<<<< * z_next[j] = z[j] + self.dt * dz_dt[j] * i = 0 */ __pyx_t_1 = __pyx_v_self->dim_z; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_j = __pyx_t_3; /* "dapy/integrators/implicitmidpoint.pyx":42 * self.update_dz_dt(z, time + self.dt / 2., dz_dt) * for j in range(self.dim_z): * z_next[j] = z[j] + self.dt * dz_dt[j] # <<<<<<<<<<<<<< * i = 0 * max_abs_diff = self.tol + 1. */ __pyx_t_4 = __pyx_v_j; __pyx_t_5 = __pyx_v_j; __pyx_t_6 = __pyx_v_j; *((double *) ( /* dim=0 */ (__pyx_v_z_next.data + __pyx_t_6 * __pyx_v_z_next.strides[0]) )) = ((*((double *) ( /* dim=0 */ (__pyx_v_z.data + __pyx_t_4 * __pyx_v_z.strides[0]) ))) + (__pyx_v_self->dt * (*((double *) ( /* dim=0 */ (__pyx_v_dz_dt.data + __pyx_t_5 * __pyx_v_dz_dt.strides[0]) ))))); } /* "dapy/integrators/implicitmidpoint.pyx":43 * for j in range(self.dim_z): * z_next[j] = z[j] + self.dt * dz_dt[j] * i = 0 # <<<<<<<<<<<<<< * max_abs_diff = self.tol + 1. * while max_abs_diff > self.tol and i < self.max_iters: */ __pyx_v_i = 0; /* "dapy/integrators/implicitmidpoint.pyx":44 * z_next[j] = z[j] + self.dt * dz_dt[j] * i = 0 * max_abs_diff = self.tol + 1. # <<<<<<<<<<<<<< * while max_abs_diff > self.tol and i < self.max_iters: * max_abs_diff = 0. */ __pyx_v_max_abs_diff = (__pyx_v_self->tol + 1.); /* "dapy/integrators/implicitmidpoint.pyx":45 * i = 0 * max_abs_diff = self.tol + 1. * while max_abs_diff > self.tol and i < self.max_iters: # <<<<<<<<<<<<<< * max_abs_diff = 0. * for j in range(self.dim_z): */ while (1) { __pyx_t_8 = ((__pyx_v_max_abs_diff > __pyx_v_self->tol) != 0); if (__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L7_bool_binop_done; } __pyx_t_8 = ((__pyx_v_i < __pyx_v_self->max_iters) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L7_bool_binop_done:; if (!__pyx_t_7) break; /* "dapy/integrators/implicitmidpoint.pyx":46 * max_abs_diff = self.tol + 1. * while max_abs_diff > self.tol and i < self.max_iters: * max_abs_diff = 0. # <<<<<<<<<<<<<< * for j in range(self.dim_z): * z_half[j] = (z_next[j] + z[j]) / 2. */ __pyx_v_max_abs_diff = 0.; /* "dapy/integrators/implicitmidpoint.pyx":47 * while max_abs_diff > self.tol and i < self.max_iters: * max_abs_diff = 0. * for j in range(self.dim_z): # <<<<<<<<<<<<<< * z_half[j] = (z_next[j] + z[j]) / 2. * self.update_dz_dt(z_half, time + self.dt / 2., dz_dt) */ __pyx_t_1 = __pyx_v_self->dim_z; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_j = __pyx_t_3; /* "dapy/integrators/implicitmidpoint.pyx":48 * max_abs_diff = 0. * for j in range(self.dim_z): * z_half[j] = (z_next[j] + z[j]) / 2. # <<<<<<<<<<<<<< * self.update_dz_dt(z_half, time + self.dt / 2., dz_dt) * for j in range(self.dim_z): */ __pyx_t_9 = __pyx_v_j; __pyx_t_10 = __pyx_v_j; __pyx_t_11 = __pyx_v_j; *((double *) ( /* dim=0 */ (__pyx_v_z_half.data + __pyx_t_11 * __pyx_v_z_half.strides[0]) )) = (((*((double *) ( /* dim=0 */ (__pyx_v_z_next.data + __pyx_t_9 * __pyx_v_z_next.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_z.data + __pyx_t_10 * __pyx_v_z.strides[0]) )))) / 2.); } /* "dapy/integrators/implicitmidpoint.pyx":49 * for j in range(self.dim_z): * z_half[j] = (z_next[j] + z[j]) / 2. * self.update_dz_dt(z_half, time + self.dt / 2., dz_dt) # <<<<<<<<<<<<<< * for j in range(self.dim_z): * prev_val = z_next[j] */ ((struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self->__pyx_vtab)->update_dz_dt(__pyx_v_self, __pyx_v_z_half, (__pyx_v_time + (__pyx_v_self->dt / 2.)), __pyx_v_dz_dt); /* "dapy/integrators/implicitmidpoint.pyx":50 * z_half[j] = (z_next[j] + z[j]) / 2. * self.update_dz_dt(z_half, time + self.dt / 2., dz_dt) * for j in range(self.dim_z): # <<<<<<<<<<<<<< * prev_val = z_next[j] * z_next[j] = z[j] + self.dt * dz_dt[j] */ __pyx_t_1 = __pyx_v_self->dim_z; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_j = __pyx_t_3; /* "dapy/integrators/implicitmidpoint.pyx":51 * self.update_dz_dt(z_half, time + self.dt / 2., dz_dt) * for j in range(self.dim_z): * prev_val = z_next[j] # <<<<<<<<<<<<<< * z_next[j] = z[j] + self.dt * dz_dt[j] * abs_diff = fabs(z_next[j] - prev_val) */ __pyx_t_12 = __pyx_v_j; __pyx_v_prev_val = (*((double *) ( /* dim=0 */ (__pyx_v_z_next.data + __pyx_t_12 * __pyx_v_z_next.strides[0]) ))); /* "dapy/integrators/implicitmidpoint.pyx":52 * for j in range(self.dim_z): * prev_val = z_next[j] * z_next[j] = z[j] + self.dt * dz_dt[j] # <<<<<<<<<<<<<< * abs_diff = fabs(z_next[j] - prev_val) * if isnan(abs_diff) or isinf(abs_diff): */ __pyx_t_13 = __pyx_v_j; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = __pyx_v_j; *((double *) ( /* dim=0 */ (__pyx_v_z_next.data + __pyx_t_15 * __pyx_v_z_next.strides[0]) )) = ((*((double *) ( /* dim=0 */ (__pyx_v_z.data + __pyx_t_13 * __pyx_v_z.strides[0]) ))) + (__pyx_v_self->dt * (*((double *) ( /* dim=0 */ (__pyx_v_dz_dt.data + __pyx_t_14 * __pyx_v_dz_dt.strides[0]) ))))); /* "dapy/integrators/implicitmidpoint.pyx":53 * prev_val = z_next[j] * z_next[j] = z[j] + self.dt * dz_dt[j] * abs_diff = fabs(z_next[j] - prev_val) # <<<<<<<<<<<<<< * if isnan(abs_diff) or isinf(abs_diff): * return 1 */ __pyx_t_16 = __pyx_v_j; __pyx_v_abs_diff = fabs(((*((double *) ( /* dim=0 */ (__pyx_v_z_next.data + __pyx_t_16 * __pyx_v_z_next.strides[0]) ))) - __pyx_v_prev_val)); /* "dapy/integrators/implicitmidpoint.pyx":54 * z_next[j] = z[j] + self.dt * dz_dt[j] * abs_diff = fabs(z_next[j] - prev_val) * if isnan(abs_diff) or isinf(abs_diff): # <<<<<<<<<<<<<< * return 1 * if abs_diff > max_abs_diff: */ __pyx_t_8 = (isnan(__pyx_v_abs_diff) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L14_bool_binop_done; } __pyx_t_8 = (isinf(__pyx_v_abs_diff) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L14_bool_binop_done:; if (__pyx_t_7) { /* "dapy/integrators/implicitmidpoint.pyx":55 * abs_diff = fabs(z_next[j] - prev_val) * if isnan(abs_diff) or isinf(abs_diff): * return 1 # <<<<<<<<<<<<<< * if abs_diff > max_abs_diff: * max_abs_diff = abs_diff */ __pyx_r = 1; goto __pyx_L0; /* "dapy/integrators/implicitmidpoint.pyx":54 * z_next[j] = z[j] + self.dt * dz_dt[j] * abs_diff = fabs(z_next[j] - prev_val) * if isnan(abs_diff) or isinf(abs_diff): # <<<<<<<<<<<<<< * return 1 * if abs_diff > max_abs_diff: */ } /* "dapy/integrators/implicitmidpoint.pyx":56 * if isnan(abs_diff) or isinf(abs_diff): * return 1 * if abs_diff > max_abs_diff: # <<<<<<<<<<<<<< * max_abs_diff = abs_diff * i += 1 */ __pyx_t_7 = ((__pyx_v_abs_diff > __pyx_v_max_abs_diff) != 0); if (__pyx_t_7) { /* "dapy/integrators/implicitmidpoint.pyx":57 * return 1 * if abs_diff > max_abs_diff: * max_abs_diff = abs_diff # <<<<<<<<<<<<<< * i += 1 * if i == self.max_iters: */ __pyx_v_max_abs_diff = __pyx_v_abs_diff; /* "dapy/integrators/implicitmidpoint.pyx":56 * if isnan(abs_diff) or isinf(abs_diff): * return 1 * if abs_diff > max_abs_diff: # <<<<<<<<<<<<<< * max_abs_diff = abs_diff * i += 1 */ } } /* "dapy/integrators/implicitmidpoint.pyx":58 * if abs_diff > max_abs_diff: * max_abs_diff = abs_diff * i += 1 # <<<<<<<<<<<<<< * if i == self.max_iters: * return 1 */ __pyx_v_i = (__pyx_v_i + 1); } /* "dapy/integrators/implicitmidpoint.pyx":59 * max_abs_diff = abs_diff * i += 1 * if i == self.max_iters: # <<<<<<<<<<<<<< * return 1 * else: */ __pyx_t_7 = ((__pyx_v_i == __pyx_v_self->max_iters) != 0); if (__pyx_t_7) { /* "dapy/integrators/implicitmidpoint.pyx":60 * i += 1 * if i == self.max_iters: * return 1 # <<<<<<<<<<<<<< * else: * return 0 */ __pyx_r = 1; goto __pyx_L0; /* "dapy/integrators/implicitmidpoint.pyx":59 * max_abs_diff = abs_diff * i += 1 * if i == self.max_iters: # <<<<<<<<<<<<<< * return 1 * else: */ } /* "dapy/integrators/implicitmidpoint.pyx":62 * return 1 * else: * return 0 # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ /*else*/ { __pyx_r = 0; goto __pyx_L0; } /* "dapy/integrators/implicitmidpoint.pyx":35 * @cython.wraparound(False) * @cython.cdivision(True) * cdef bint implicit_midpoint_step( # <<<<<<<<<<<<<< * self, double[:] z, double time, double[:] dz_dt, * double[:] z_half, double[:] z_next) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "dapy/integrators/implicitmidpoint.pyx":67 * @cython.wraparound(False) * @cython.cdivision(True) * cdef void partition_particles(self, int n_particles): # <<<<<<<<<<<<<< * """Partition particle index range in to equal sized intervals. * Used to allocate particle updates to different parallel threads. */ static void __pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_partition_particles(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, int __pyx_v_n_particles) { int __pyx_v_t; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("partition_particles", 0); /* "dapy/integrators/implicitmidpoint.pyx":75 * """ * cdef int t * for t in range(self.n_threads): # <<<<<<<<<<<<<< * self.intervals[t] = <int>( * t * <float>(n_particles) / self.n_threads) */ __pyx_t_1 = __pyx_v_self->n_threads; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_t = __pyx_t_3; /* "dapy/integrators/implicitmidpoint.pyx":76 * cdef int t * for t in range(self.n_threads): * self.intervals[t] = <int>( # <<<<<<<<<<<<<< * t * <float>(n_particles) / self.n_threads) * self.intervals[self.n_threads] = n_particles */ __pyx_t_4 = __pyx_v_t; *((int *) ( /* dim=0 */ (__pyx_v_self->intervals.data + __pyx_t_4 * __pyx_v_self->intervals.strides[0]) )) = ((int)((__pyx_v_t * ((float)__pyx_v_n_particles)) / ((float)__pyx_v_self->n_threads))); } /* "dapy/integrators/implicitmidpoint.pyx":78 * self.intervals[t] = <int>( * t * <float>(n_particles) / self.n_threads) * self.intervals[self.n_threads] = n_particles # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_t_5 = __pyx_v_self->n_threads; *((int *) ( /* dim=0 */ (__pyx_v_self->intervals.data + __pyx_t_5 * __pyx_v_self->intervals.strides[0]) )) = __pyx_v_n_particles; /* "dapy/integrators/implicitmidpoint.pyx":67 * @cython.wraparound(False) * @cython.cdivision(True) * cdef void partition_particles(self, int n_particles): # <<<<<<<<<<<<<< * """Partition particle index range in to equal sized intervals. * Used to allocate particle updates to different parallel threads. */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "dapy/integrators/implicitmidpoint.pyx":83 * @cython.wraparound(False) * @cython.cdivision(True) * def forward_integrate( # <<<<<<<<<<<<<< * self, double[:, :] z_particles, int start_time_index, * int n_steps=1): */ /* Python wrapper */ static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_3forward_integrate(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_2forward_integrate[] = "ImplicitMidpointIntegrator.forward_integrate(self, double[:, :] z_particles, int start_time_index, int n_steps=1)\nIntegrate a set of state particles forward in time.\n\n Args:\n z_particles (array): Array of current state particle values of\n shape `(n_particles, dim_z)`.\n start_time_index (int): Integer indicating current time index\n associated with the `z_curr` states (i.e. number of previous\n `forward_integrate` calls) to allow for calculate of time for\n non-homogeneous systems.\n n_step (int): Number of integrator time steps to perform.\n\n Returns:\n Array of forward propagated state particle values of shape\n `(n_particles, dim_z)`.\n "; static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_3forward_integrate(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_z_particles = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_start_time_index; int __pyx_v_n_steps; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("forward_integrate (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z_particles,&__pyx_n_s_start_time_index,&__pyx_n_s_n_steps,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_z_particles)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_start_time_index)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("forward_integrate", 0, 2, 3, 1); __PYX_ERR(0, 83, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n_steps); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "forward_integrate") < 0)) __PYX_ERR(0, 83, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_z_particles = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_z_particles.memview)) __PYX_ERR(0, 84, __pyx_L3_error) __pyx_v_start_time_index = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_start_time_index == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L3_error) if (values[2]) { __pyx_v_n_steps = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_n_steps == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L3_error) } else { __pyx_v_n_steps = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("forward_integrate", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 83, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.ImplicitMidpointIntegrator.forward_integrate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_2forward_integrate(((struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self), __pyx_v_z_particles, __pyx_v_start_time_index, __pyx_v_n_steps); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_2forward_integrate(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, __Pyx_memviewslice __pyx_v_z_particles, int __pyx_v_start_time_index, int __pyx_v_n_steps) { int __pyx_v_n_particles; __Pyx_memviewslice __pyx_v_z_particles_next = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_t; int __pyx_v_p; int __pyx_v_s; int __pyx_v_error; double __pyx_v_time; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; __Pyx_memviewslice __pyx_t_19 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_20 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_21 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_22 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_23 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_24 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_RefNannySetupContext("forward_integrate", 0); /* "dapy/integrators/implicitmidpoint.pyx":101 * `(n_particles, dim_z)`. * """ * cdef int n_particles = z_particles.shape[0] # <<<<<<<<<<<<<< * self.partition_particles(n_particles) * cdef double[:, :] z_particles_next = np.empty( */ __pyx_v_n_particles = (__pyx_v_z_particles.shape[0]); /* "dapy/integrators/implicitmidpoint.pyx":102 * """ * cdef int n_particles = z_particles.shape[0] * self.partition_particles(n_particles) # <<<<<<<<<<<<<< * cdef double[:, :] z_particles_next = np.empty( * (n_particles, self.dim_z), dtype='double') */ ((struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self->__pyx_vtab)->partition_particles(__pyx_v_self, __pyx_v_n_particles); /* "dapy/integrators/implicitmidpoint.pyx":103 * cdef int n_particles = z_particles.shape[0] * self.partition_particles(n_particles) * cdef double[:, :] z_particles_next = np.empty( # <<<<<<<<<<<<<< * (n_particles, self.dim_z), dtype='double') * cdef int t, p, s, */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "dapy/integrators/implicitmidpoint.pyx":104 * self.partition_particles(n_particles) * cdef double[:, :] z_particles_next = np.empty( * (n_particles, self.dim_z), dtype='double') # <<<<<<<<<<<<<< * cdef int t, p, s, * cdef bint error */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_n_particles); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->dim_z); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; /* "dapy/integrators/implicitmidpoint.pyx":103 * cdef int n_particles = z_particles.shape[0] * self.partition_particles(n_particles) * cdef double[:, :] z_particles_next = np.empty( # <<<<<<<<<<<<<< * (n_particles, self.dim_z), dtype='double') * cdef int t, p, s, */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; /* "dapy/integrators/implicitmidpoint.pyx":104 * self.partition_particles(n_particles) * cdef double[:, :] z_particles_next = np.empty( * (n_particles, self.dim_z), dtype='double') # <<<<<<<<<<<<<< * cdef int t, p, s, * cdef bint error */ __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_u_double) < 0) __PYX_ERR(0, 104, __pyx_L1_error) /* "dapy/integrators/implicitmidpoint.pyx":103 * cdef int n_particles = z_particles.shape[0] * self.partition_particles(n_particles) * cdef double[:, :] z_particles_next = np.empty( # <<<<<<<<<<<<<< * (n_particles, self.dim_z), dtype='double') * cdef int t, p, s, */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_z_particles_next = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "dapy/integrators/implicitmidpoint.pyx":107 * cdef int t, p, s, * cdef bint error * cdef double time = start_time_index * n_steps * self.dt # <<<<<<<<<<<<<< * for t in prange(self.n_threads, nogil=True, schedule='static', * chunksize=1, num_threads=self.n_threads): */ __pyx_v_time = ((__pyx_v_start_time_index * __pyx_v_n_steps) * __pyx_v_self->dt); /* "dapy/integrators/implicitmidpoint.pyx":108 * cdef bint error * cdef double time = start_time_index * n_steps * self.dt * for t in prange(self.n_threads, nogil=True, schedule='static', # <<<<<<<<<<<<<< * chunksize=1, num_threads=self.n_threads): * for p in range(self.intervals[t], self.intervals[t+1]): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_6 = __pyx_v_self->n_threads; if (1 == 0) abort(); { int __pyx_parallel_temp0 = ((int)0xbad0bad0); int __pyx_parallel_temp1 = ((int)0xbad0bad0); int __pyx_parallel_temp2 = ((int)0xbad0bad0); int __pyx_parallel_temp3 = ((int)0xbad0bad0); double __pyx_parallel_temp4 = ((double)__PYX_NAN()); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; /* "dapy/integrators/implicitmidpoint.pyx":109 * cdef double time = start_time_index * n_steps * self.dt * for t in prange(self.n_threads, nogil=True, schedule='static', * chunksize=1, num_threads=self.n_threads): # <<<<<<<<<<<<<< * for p in range(self.intervals[t], self.intervals[t+1]): * for s in range(n_steps): */ __pyx_t_9 = 1; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_self->n_threads) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18) firstprivate(__pyx_t_1, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_3, __pyx_t_4) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_error) lastprivate(__pyx_v_p) lastprivate(__pyx_v_s) firstprivate(__pyx_v_t) lastprivate(__pyx_v_t) lastprivate(__pyx_v_time) schedule(static, __pyx_t_9) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ if (__pyx_parallel_why < 2) { __pyx_v_t = (int)(0 + 1 * __pyx_t_7); /* Initialize private variables to invalid values */ __pyx_v_error = ((int)0xbad0bad0); __pyx_v_p = ((int)0xbad0bad0); __pyx_v_s = ((int)0xbad0bad0); __pyx_v_time = ((double)__PYX_NAN()); /* "dapy/integrators/implicitmidpoint.pyx":110 * for t in prange(self.n_threads, nogil=True, schedule='static', * chunksize=1, num_threads=self.n_threads): * for p in range(self.intervals[t], self.intervals[t+1]): # <<<<<<<<<<<<<< * for s in range(n_steps): * if s == 0: */ __pyx_t_10 = (__pyx_v_t + 1); __pyx_t_11 = (*((int *) ( /* dim=0 */ (__pyx_v_self->intervals.data + __pyx_t_10 * __pyx_v_self->intervals.strides[0]) ))); __pyx_t_12 = __pyx_v_t; __pyx_t_13 = __pyx_t_11; for (__pyx_t_14 = (*((int *) ( /* dim=0 */ (__pyx_v_self->intervals.data + __pyx_t_12 * __pyx_v_self->intervals.strides[0]) ))); __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { __pyx_v_p = __pyx_t_14; /* "dapy/integrators/implicitmidpoint.pyx":111 * chunksize=1, num_threads=self.n_threads): * for p in range(self.intervals[t], self.intervals[t+1]): * for s in range(n_steps): # <<<<<<<<<<<<<< * if s == 0: * self.z_temp[t, :] = z_particles[p] */ __pyx_t_15 = __pyx_v_n_steps; __pyx_t_16 = __pyx_t_15; for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) { __pyx_v_s = __pyx_t_17; /* "dapy/integrators/implicitmidpoint.pyx":112 * for p in range(self.intervals[t], self.intervals[t+1]): * for s in range(n_steps): * if s == 0: # <<<<<<<<<<<<<< * self.z_temp[t, :] = z_particles[p] * else: */ __pyx_t_18 = ((__pyx_v_s == 0) != 0); if (__pyx_t_18) { /* "dapy/integrators/implicitmidpoint.pyx":113 * for s in range(n_steps): * if s == 0: * self.z_temp[t, :] = z_particles[p] # <<<<<<<<<<<<<< * else: * self.z_temp[t, :] = z_particles_next[p] */ __pyx_t_19.data = __pyx_v_z_particles.data; __pyx_t_19.memview = __pyx_v_z_particles.memview; __PYX_INC_MEMVIEW(&__pyx_t_19, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_p; Py_ssize_t __pyx_tmp_stride = __pyx_v_z_particles.strides[0]; if ((0)) __PYX_ERR(0, 113, __pyx_L8_error) __pyx_t_19.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_19.shape[0] = __pyx_v_z_particles.shape[1]; __pyx_t_19.strides[0] = __pyx_v_z_particles.strides[1]; __pyx_t_19.suboffsets[0] = -1; __pyx_t_20.data = __pyx_v_self->z_temp.data; __pyx_t_20.memview = __pyx_v_self->z_temp.memview; __PYX_INC_MEMVIEW(&__pyx_t_20, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_stride = __pyx_v_self->z_temp.strides[0]; if ((0)) __PYX_ERR(0, 113, __pyx_L8_error) __pyx_t_20.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_20.shape[0] = __pyx_v_self->z_temp.shape[1]; __pyx_t_20.strides[0] = __pyx_v_self->z_temp.strides[1]; __pyx_t_20.suboffsets[0] = -1; if (unlikely(__pyx_memoryview_copy_contents(__pyx_t_19, __pyx_t_20, 1, 1, 0) < 0)) __PYX_ERR(0, 113, __pyx_L8_error) __PYX_XDEC_MEMVIEW(&__pyx_t_20, 0); __pyx_t_20.memview = NULL; __pyx_t_20.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_19, 0); __pyx_t_19.memview = NULL; __pyx_t_19.data = NULL; /* "dapy/integrators/implicitmidpoint.pyx":112 * for p in range(self.intervals[t], self.intervals[t+1]): * for s in range(n_steps): * if s == 0: # <<<<<<<<<<<<<< * self.z_temp[t, :] = z_particles[p] * else: */ goto __pyx_L14; } /* "dapy/integrators/implicitmidpoint.pyx":115 * self.z_temp[t, :] = z_particles[p] * else: * self.z_temp[t, :] = z_particles_next[p] # <<<<<<<<<<<<<< * error = self.implicit_midpoint_step( * self.z_temp[t], time, self.dz_dt[t], self.z_half[t], */ /*else*/ { __pyx_t_19.data = __pyx_v_z_particles_next.data; __pyx_t_19.memview = __pyx_v_z_particles_next.memview; __PYX_INC_MEMVIEW(&__pyx_t_19, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_p; Py_ssize_t __pyx_tmp_stride = __pyx_v_z_particles_next.strides[0]; if ((0)) __PYX_ERR(0, 115, __pyx_L8_error) __pyx_t_19.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_19.shape[0] = __pyx_v_z_particles_next.shape[1]; __pyx_t_19.strides[0] = __pyx_v_z_particles_next.strides[1]; __pyx_t_19.suboffsets[0] = -1; __pyx_t_21.data = __pyx_v_self->z_temp.data; __pyx_t_21.memview = __pyx_v_self->z_temp.memview; __PYX_INC_MEMVIEW(&__pyx_t_21, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_stride = __pyx_v_self->z_temp.strides[0]; if ((0)) __PYX_ERR(0, 115, __pyx_L8_error) __pyx_t_21.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_21.shape[0] = __pyx_v_self->z_temp.shape[1]; __pyx_t_21.strides[0] = __pyx_v_self->z_temp.strides[1]; __pyx_t_21.suboffsets[0] = -1; if (unlikely(__pyx_memoryview_copy_contents(__pyx_t_19, __pyx_t_21, 1, 1, 0) < 0)) __PYX_ERR(0, 115, __pyx_L8_error) __PYX_XDEC_MEMVIEW(&__pyx_t_21, 0); __pyx_t_21.memview = NULL; __pyx_t_21.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_19, 0); __pyx_t_19.memview = NULL; __pyx_t_19.data = NULL; } __pyx_L14:; /* "dapy/integrators/implicitmidpoint.pyx":117 * self.z_temp[t, :] = z_particles_next[p] * error = self.implicit_midpoint_step( * self.z_temp[t], time, self.dz_dt[t], self.z_half[t], # <<<<<<<<<<<<<< * z_particles_next[p]) * if error == 1: */ __pyx_t_19.data = __pyx_v_self->z_temp.data; __pyx_t_19.memview = __pyx_v_self->z_temp.memview; __PYX_INC_MEMVIEW(&__pyx_t_19, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_stride = __pyx_v_self->z_temp.strides[0]; if ((0)) __PYX_ERR(0, 117, __pyx_L8_error) __pyx_t_19.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_19.shape[0] = __pyx_v_self->z_temp.shape[1]; __pyx_t_19.strides[0] = __pyx_v_self->z_temp.strides[1]; __pyx_t_19.suboffsets[0] = -1; __pyx_t_22.data = __pyx_v_self->dz_dt.data; __pyx_t_22.memview = __pyx_v_self->dz_dt.memview; __PYX_INC_MEMVIEW(&__pyx_t_22, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_stride = __pyx_v_self->dz_dt.strides[0]; if ((0)) __PYX_ERR(0, 117, __pyx_L8_error) __pyx_t_22.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_22.shape[0] = __pyx_v_self->dz_dt.shape[1]; __pyx_t_22.strides[0] = __pyx_v_self->dz_dt.strides[1]; __pyx_t_22.suboffsets[0] = -1; __pyx_t_23.data = __pyx_v_self->z_half.data; __pyx_t_23.memview = __pyx_v_self->z_half.memview; __PYX_INC_MEMVIEW(&__pyx_t_23, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_t; Py_ssize_t __pyx_tmp_stride = __pyx_v_self->z_half.strides[0]; if ((0)) __PYX_ERR(0, 117, __pyx_L8_error) __pyx_t_23.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_23.shape[0] = __pyx_v_self->z_half.shape[1]; __pyx_t_23.strides[0] = __pyx_v_self->z_half.strides[1]; __pyx_t_23.suboffsets[0] = -1; __pyx_t_24.data = __pyx_v_z_particles_next.data; /* "dapy/integrators/implicitmidpoint.pyx":118 * error = self.implicit_midpoint_step( * self.z_temp[t], time, self.dz_dt[t], self.z_half[t], * z_particles_next[p]) # <<<<<<<<<<<<<< * if error == 1: * with gil: */ __pyx_t_24.memview = __pyx_v_z_particles_next.memview; __PYX_INC_MEMVIEW(&__pyx_t_24, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_p; Py_ssize_t __pyx_tmp_stride = __pyx_v_z_particles_next.strides[0]; if ((0)) __PYX_ERR(0, 118, __pyx_L8_error) __pyx_t_24.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_24.shape[0] = __pyx_v_z_particles_next.shape[1]; __pyx_t_24.strides[0] = __pyx_v_z_particles_next.strides[1]; __pyx_t_24.suboffsets[0] = -1; __pyx_v_error = ((struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self->__pyx_vtab)->implicit_midpoint_step(__pyx_v_self, __pyx_t_19, __pyx_v_time, __pyx_t_22, __pyx_t_23, __pyx_t_24); /* "dapy/integrators/implicitmidpoint.pyx":116 * else: * self.z_temp[t, :] = z_particles_next[p] * error = self.implicit_midpoint_step( # <<<<<<<<<<<<<< * self.z_temp[t], time, self.dz_dt[t], self.z_half[t], * z_particles_next[p]) */ __PYX_XDEC_MEMVIEW(&__pyx_t_19, 0); __pyx_t_19.memview = NULL; __pyx_t_19.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_22, 0); __pyx_t_22.memview = NULL; __pyx_t_22.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_23, 0); __pyx_t_23.memview = NULL; __pyx_t_23.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_24, 0); __pyx_t_24.memview = NULL; __pyx_t_24.data = NULL; /* "dapy/integrators/implicitmidpoint.pyx":119 * self.z_temp[t], time, self.dz_dt[t], self.z_half[t], * z_particles_next[p]) * if error == 1: # <<<<<<<<<<<<<< * with gil: * raise ConvergenceError( */ __pyx_t_18 = ((__pyx_v_error == 1) != 0); if (__pyx_t_18) { /* "dapy/integrators/implicitmidpoint.pyx":120 * z_particles_next[p]) * if error == 1: * with gil: # <<<<<<<<<<<<<< * raise ConvergenceError( * 'Convergence error in implicit midpoint step.') */ { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { /* "dapy/integrators/implicitmidpoint.pyx":121 * if error == 1: * with gil: * raise ConvergenceError( # <<<<<<<<<<<<<< * 'Convergence error in implicit midpoint step.') * time = time + self.dt */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_ConvergenceError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 121, __pyx_L19_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_3, __pyx_kp_u_Convergence_error_in_implicit_mi) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_kp_u_Convergence_error_in_implicit_mi); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L19_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 121, __pyx_L19_error) } /* "dapy/integrators/implicitmidpoint.pyx":120 * z_particles_next[p]) * if error == 1: * with gil: # <<<<<<<<<<<<<< * raise ConvergenceError( * 'Convergence error in implicit midpoint step.') */ /*finally:*/ { __pyx_L19_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L8_error; } } } /* "dapy/integrators/implicitmidpoint.pyx":119 * self.z_temp[t], time, self.dz_dt[t], self.z_half[t], * z_particles_next[p]) * if error == 1: # <<<<<<<<<<<<<< * with gil: * raise ConvergenceError( */ } /* "dapy/integrators/implicitmidpoint.pyx":123 * raise ConvergenceError( * 'Convergence error in implicit midpoint step.') * time = time + self.dt # <<<<<<<<<<<<<< * return z_particles_next */ __pyx_v_time = (__pyx_v_time + __pyx_v_self->dt); } } goto __pyx_L22; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L21; __pyx_L21:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_error; __pyx_parallel_temp1 = __pyx_v_p; __pyx_parallel_temp2 = __pyx_v_s; __pyx_parallel_temp3 = __pyx_v_t; __pyx_parallel_temp4 = __pyx_v_time; } __pyx_L22:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_19, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_20, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_21, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_22, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_23, 0); __PYX_XDEC_MEMVIEW(&__pyx_t_24, 0); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = NULL; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = NULL; #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_error = __pyx_parallel_temp0; __pyx_v_p = __pyx_parallel_temp1; __pyx_v_s = __pyx_parallel_temp2; __pyx_v_t = __pyx_parallel_temp3; __pyx_v_time = __pyx_parallel_temp4; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "dapy/integrators/implicitmidpoint.pyx":108 * cdef bint error * cdef double time = start_time_index * n_steps * self.dt * for t in prange(self.n_threads, nogil=True, schedule='static', # <<<<<<<<<<<<<< * chunksize=1, num_threads=self.n_threads): * for p in range(self.intervals[t], self.intervals[t+1]): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "dapy/integrators/implicitmidpoint.pyx":124 * 'Convergence error in implicit midpoint step.') * time = time + self.dt * return z_particles_next # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_z_particles_next, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "dapy/integrators/implicitmidpoint.pyx":83 * @cython.wraparound(False) * @cython.cdivision(True) * def forward_integrate( # <<<<<<<<<<<<<< * self, double[:, :] z_particles, int start_time_index, * int n_steps=1): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_19, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_20, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_21, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_22, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_23, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_24, 1); __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.ImplicitMidpointIntegrator.forward_integrate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_z_particles_next, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_z_particles, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static char __pyx_doc_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_4__reduce_cython__[] = "ImplicitMidpointIntegrator.__reduce_cython__(self)"; static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_4__reduce_cython__(((struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_4__reduce_cython__(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_t_12; int __pyx_t_13; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.dim_z, self.dt, self.dz_dt, self.intervals, self.max_iters, self.n_steps_per_update, self.n_threads, self.tol, self.z_half, self.z_temp) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->dim_z); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyFloat_FromDouble(__pyx_v_self->dt); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_self->dz_dt, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_v_self->intervals, 1, (PyObject *(*)(char *)) __pyx_memview_get_int, (int (*)(char *, PyObject *)) __pyx_memview_set_int, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_self->max_iters); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_self->n_steps_per_update); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_self->n_threads); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyFloat_FromDouble(__pyx_v_self->tol); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = __pyx_memoryview_fromslice(__pyx_v_self->z_half, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __pyx_memoryview_fromslice(__pyx_v_self->z_temp, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = PyTuple_New(10); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 3, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_11, 4, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_11, 5, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 6, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_11, 7, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_11, 8, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_11, 9, __pyx_t_10); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_v_state = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.dim_z, self.dt, self.dz_dt, self.intervals, self.max_iters, self.n_steps_per_update, self.n_threads, self.tol, self.z_half, self.z_temp) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_11 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_v__dict = __pyx_t_11; __pyx_t_11 = 0; /* "(tree fragment)":7 * state = (self.dim_z, self.dt, self.dz_dt, self.intervals, self.max_iters, self.n_steps_per_update, self.n_threads, self.tol, self.z_half, self.z_temp) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_12 = (__pyx_v__dict != Py_None); __pyx_t_13 = (__pyx_t_12 != 0); if (__pyx_t_13) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_v__dict); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_11); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = False */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.dim_z, self.dt, self.dz_dt, self.intervals, self.max_iters, self.n_steps_per_update, self.n_threads, self.tol, self.z_half, self.z_temp) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = False # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, None), state */ /*else*/ { __pyx_v_use_setstate = 0; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = False * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, None), state * else: */ __pyx_t_13 = (__pyx_v_use_setstate != 0); if (__pyx_t_13) { /* "(tree fragment)":13 * use_setstate = False * if use_setstate: * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_pyx_unpickle_ImplicitMidpointI); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = PyTuple_New(3); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_11, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_139854930); __Pyx_GIVEREF(__pyx_int_139854930); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_int_139854930); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_11, 2, Py_None); __pyx_t_9 = PyTuple_New(3); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_10); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_11); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_v_state); __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = False * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, None), state * else: * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_pyx_unpickle_ImplicitMidpointI); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = PyTuple_New(3); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_11, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_139854930); __Pyx_GIVEREF(__pyx_int_139854930); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_int_139854930); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_11, 2, __pyx_v_state); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_11); __pyx_t_9 = 0; __pyx_t_11 = 0; __pyx_r = __pyx_t_10; __pyx_t_10 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.ImplicitMidpointIntegrator.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static char __pyx_doc_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_6__setstate_cython__[] = "ImplicitMidpointIntegrator.__setstate_cython__(self, __pyx_state)"; static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_6__setstate_cython__(((struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_6__setstate_cython__(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_f_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_ImplicitMidpointIntegrator, (type(self), 0x8560452, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.ImplicitMidpointIntegrator.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __pyx_unpickle_ImplicitMidpointIntegrator(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_1__pyx_unpickle_ImplicitMidpointIntegrator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator[] = "__pyx_unpickle_ImplicitMidpointIntegrator(__pyx_type, long __pyx_checksum, __pyx_state)"; static PyMethodDef __pyx_mdef_4dapy_11integrators_16implicitmidpoint_1__pyx_unpickle_ImplicitMidpointIntegrator = {"__pyx_unpickle_ImplicitMidpointIntegrator", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4dapy_11integrators_16implicitmidpoint_1__pyx_unpickle_ImplicitMidpointIntegrator, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator}; static PyObject *__pyx_pw_4dapy_11integrators_16implicitmidpoint_1__pyx_unpickle_ImplicitMidpointIntegrator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_ImplicitMidpointIntegrator (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ImplicitMidpointIntegrator", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ImplicitMidpointIntegrator", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_ImplicitMidpointIntegrator") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_ImplicitMidpointIntegrator", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.__pyx_unpickle_ImplicitMidpointIntegrator", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; __Pyx_RefNannySetupContext("__pyx_unpickle_ImplicitMidpointIntegrator", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0x8560452: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0x8560452) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0x8560452: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))" % __pyx_checksum) * __pyx_result = ImplicitMidpointIntegrator.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0x8560452: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = ImplicitMidpointIntegrator.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x85, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0x8560452: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))" % __pyx_checksum) * __pyx_result = ImplicitMidpointIntegrator.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(<ImplicitMidpointIntegrator> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))" % __pyx_checksum) * __pyx_result = ImplicitMidpointIntegrator.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(<ImplicitMidpointIntegrator> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = ImplicitMidpointIntegrator.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(<ImplicitMidpointIntegrator> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_ImplicitMidpointIntegrator__set_state(ImplicitMidpointIntegrator __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_f_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator__set_state(((struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0x8560452 = (dim_z, dt, dz_dt, intervals, max_iters, n_steps_per_update, n_threads, tol, z_half, z_temp))" % __pyx_checksum) * __pyx_result = ImplicitMidpointIntegrator.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(<ImplicitMidpointIntegrator> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(<ImplicitMidpointIntegrator> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_ImplicitMidpointIntegrator__set_state(ImplicitMidpointIntegrator __pyx_result, tuple __pyx_state): * __pyx_result.dim_z = __pyx_state[0]; __pyx_result.dt = __pyx_state[1]; __pyx_result.dz_dt = __pyx_state[2]; __pyx_result.intervals = __pyx_state[3]; __pyx_result.max_iters = __pyx_state[4]; __pyx_result.n_steps_per_update = __pyx_state[5]; __pyx_result.n_threads = __pyx_state[6]; __pyx_result.tol = __pyx_state[7]; __pyx_result.z_half = __pyx_state[8]; __pyx_result.z_temp = __pyx_state[9] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_ImplicitMidpointIntegrator(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.__pyx_unpickle_ImplicitMidpointIntegrator", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(<ImplicitMidpointIntegrator> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_ImplicitMidpointIntegrator__set_state(ImplicitMidpointIntegrator __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.dim_z = __pyx_state[0]; __pyx_result.dt = __pyx_state[1]; __pyx_result.dz_dt = __pyx_state[2]; __pyx_result.intervals = __pyx_state[3]; __pyx_result.max_iters = __pyx_state[4]; __pyx_result.n_steps_per_update = __pyx_state[5]; __pyx_result.n_threads = __pyx_state[6]; __pyx_result.tol = __pyx_state[7]; __pyx_result.z_half = __pyx_state[8]; __pyx_result.z_temp = __pyx_state[9] * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_f_4dapy_11integrators_16implicitmidpoint___pyx_unpickle_ImplicitMidpointIntegrator__set_state(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; double __pyx_t_2; __Pyx_memviewslice __pyx_t_3 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_ImplicitMidpointIntegrator__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_ImplicitMidpointIntegrator__set_state(ImplicitMidpointIntegrator __pyx_result, tuple __pyx_state): * __pyx_result.dim_z = __pyx_state[0]; __pyx_result.dt = __pyx_state[1]; __pyx_result.dz_dt = __pyx_state[2]; __pyx_result.intervals = __pyx_state[3]; __pyx_result.max_iters = __pyx_state[4]; __pyx_result.n_steps_per_update = __pyx_state[5]; __pyx_result.n_threads = __pyx_state[6]; __pyx_result.tol = __pyx_state[7]; __pyx_result.z_half = __pyx_state[8]; __pyx_result.z_temp = __pyx_state[9] # <<<<<<<<<<<<<< * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[10]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_PyInt_As_int(PyTuple_GET_ITEM(__pyx_v___pyx_state, 0)); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_v___pyx_result->dim_z = __pyx_t_1; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_2 = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_v___pyx_state, 1)); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_v___pyx_result->dt = __pyx_t_2; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyTuple_GET_ITEM(__pyx_v___pyx_state, 2), PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(1, 12, __pyx_L1_error) __PYX_XDEC_MEMVIEW(&__pyx_v___pyx_result->dz_dt, 0); __pyx_v___pyx_result->dz_dt = __pyx_t_3; __pyx_t_3.memview = NULL; __pyx_t_3.data = NULL; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyTuple_GET_ITEM(__pyx_v___pyx_state, 3), PyBUF_WRITABLE); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(1, 12, __pyx_L1_error) __PYX_XDEC_MEMVIEW(&__pyx_v___pyx_result->intervals, 0); __pyx_v___pyx_result->intervals = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_PyInt_As_int(PyTuple_GET_ITEM(__pyx_v___pyx_state, 4)); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_v___pyx_result->max_iters = __pyx_t_1; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_PyInt_As_int(PyTuple_GET_ITEM(__pyx_v___pyx_state, 5)); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_v___pyx_result->n_steps_per_update = __pyx_t_1; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_PyInt_As_int(PyTuple_GET_ITEM(__pyx_v___pyx_state, 6)); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_v___pyx_result->n_threads = __pyx_t_1; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_2 = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_v___pyx_state, 7)); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_v___pyx_result->tol = __pyx_t_2; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyTuple_GET_ITEM(__pyx_v___pyx_state, 8), PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(1, 12, __pyx_L1_error) __PYX_XDEC_MEMVIEW(&__pyx_v___pyx_result->z_half, 0); __pyx_v___pyx_result->z_half = __pyx_t_3; __pyx_t_3.memview = NULL; __pyx_t_3.data = NULL; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyTuple_GET_ITEM(__pyx_v___pyx_state, 9), PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(1, 12, __pyx_L1_error) __PYX_XDEC_MEMVIEW(&__pyx_v___pyx_result->z_temp, 0); __pyx_v___pyx_result->z_temp = __pyx_t_3; __pyx_t_3.memview = NULL; __pyx_t_3.data = NULL; /* "(tree fragment)":13 * cdef __pyx_unpickle_ImplicitMidpointIntegrator__set_state(ImplicitMidpointIntegrator __pyx_result, tuple __pyx_state): * __pyx_result.dim_z = __pyx_state[0]; __pyx_result.dt = __pyx_state[1]; __pyx_result.dz_dt = __pyx_state[2]; __pyx_result.intervals = __pyx_state[3]; __pyx_result.max_iters = __pyx_state[4]; __pyx_result.n_steps_per_update = __pyx_state[5]; __pyx_result.n_threads = __pyx_state[6]; __pyx_result.tol = __pyx_state[7]; __pyx_result.z_half = __pyx_state[8]; __pyx_result.z_temp = __pyx_state[9] * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[10]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_6 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_7 = ((__pyx_t_6 > 10) != 0); if (__pyx_t_7) { } else { __pyx_t_5 = __pyx_t_7; goto __pyx_L4_bool_binop_done; } __pyx_t_7 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_8 = (__pyx_t_7 != 0); __pyx_t_5 = __pyx_t_8; __pyx_L4_bool_binop_done:; if (__pyx_t_5) { /* "(tree fragment)":14 * __pyx_result.dim_z = __pyx_state[0]; __pyx_result.dt = __pyx_state[1]; __pyx_result.dz_dt = __pyx_state[2]; __pyx_result.intervals = __pyx_state[3]; __pyx_result.max_iters = __pyx_state[4]; __pyx_result.n_steps_per_update = __pyx_state[5]; __pyx_result.n_threads = __pyx_state[6]; __pyx_result.tol = __pyx_state[7]; __pyx_result.z_half = __pyx_state[8]; __pyx_result.z_temp = __pyx_state[9] * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[10]) # <<<<<<<<<<<<<< */ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_update); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } __pyx_t_9 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_11, __pyx_t_10, PyTuple_GET_ITEM(__pyx_v___pyx_state, 10)) : __Pyx_PyObject_CallOneArg(__pyx_t_11, PyTuple_GET_ITEM(__pyx_v___pyx_state, 10)); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_ImplicitMidpointIntegrator__set_state(ImplicitMidpointIntegrator __pyx_result, tuple __pyx_state): * __pyx_result.dim_z = __pyx_state[0]; __pyx_result.dt = __pyx_state[1]; __pyx_result.dz_dt = __pyx_state[2]; __pyx_result.intervals = __pyx_state[3]; __pyx_result.max_iters = __pyx_state[4]; __pyx_result.n_steps_per_update = __pyx_state[5]; __pyx_result.n_threads = __pyx_state[6]; __pyx_result.tol = __pyx_state[7]; __pyx_result.z_half = __pyx_state[8]; __pyx_result.z_temp = __pyx_state[9] * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[10]) */ } /* "(tree fragment)":11 * __pyx_unpickle_ImplicitMidpointIntegrator__set_state(<ImplicitMidpointIntegrator> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_ImplicitMidpointIntegrator__set_state(ImplicitMidpointIntegrator __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.dim_z = __pyx_state[0]; __pyx_result.dt = __pyx_state[1]; __pyx_result.dz_dt = __pyx_state[2]; __pyx_result.intervals = __pyx_state[3]; __pyx_result.max_iters = __pyx_state[4]; __pyx_result.n_steps_per_update = __pyx_state[5]; __pyx_result.n_threads = __pyx_state[6]; __pyx_result.tol = __pyx_state[7]; __pyx_result.z_half = __pyx_state[8]; __pyx_result.z_temp = __pyx_state[9] * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_3, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("dapy.integrators.implicitmidpoint.__pyx_unpickle_ImplicitMidpointIntegrator__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; PyArray_Descr *__pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * * cdef int i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * cdef int i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 272, __pyx_L1_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":275 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 276, __pyx_L1_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":279 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":284 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":287 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L9; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L9:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":293 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":296 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = <dtype>PyArray_DESCR(self) * cdef int offset */ __pyx_v_f = NULL; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":297 * cdef int t * cdef char* f = NULL * cdef dtype descr = <dtype>PyArray_DESCR(self) # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_7 = PyArray_DESCR(__pyx_v_self); __pyx_t_3 = ((PyObject *)__pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":300 * cdef int offset * * info.obj = self # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(descr): */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0); if (__pyx_t_1) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":303 * * if not PyDataType_HASFIELDS(descr): * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L15_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_L15_next_or:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":305 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L14_bool_binop_done:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_1)) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 306, __pyx_L1_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":307 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; case NPY_UBYTE: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":308 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_v_f = ((char *)"B"); break; case NPY_SHORT: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":309 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_v_f = ((char *)"h"); break; case NPY_USHORT: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":310 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_v_f = ((char *)"H"); break; case NPY_INT: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":311 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_v_f = ((char *)"i"); break; case NPY_UINT: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":312 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_v_f = ((char *)"I"); break; case NPY_LONG: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":313 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_v_f = ((char *)"l"); break; case NPY_ULONG: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":314 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_v_f = ((char *)"L"); break; case NPY_LONGLONG: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":315 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_v_f = ((char *)"q"); break; case NPY_ULONGLONG: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":316 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_v_f = ((char *)"Q"); break; case NPY_FLOAT: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":317 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_v_f = ((char *)"f"); break; case NPY_DOUBLE: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":318 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_v_f = ((char *)"d"); break; case NPY_LONGDOUBLE: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":319 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_v_f = ((char *)"g"); break; case NPY_CFLOAT: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":320 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_v_f = ((char *)"Zf"); break; case NPY_CDOUBLE: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":321 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_v_f = ((char *)"Zd"); break; case NPY_CLONGDOUBLE: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":322 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_v_f = ((char *)"Zg"); break; case NPY_OBJECT: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":323 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_v_f = ((char *)"O"); break; default: /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":325 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 325, __pyx_L1_error) break; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":326 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":327 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":329 * return * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":330 * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":331 * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":332 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(2, 332, __pyx_L1_error) __pyx_v_f = __pyx_t_9; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":335 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":339 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":341 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":825 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * return <tuple>d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":847 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":848 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(2, 851, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(2, 851, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":852 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(2, 852, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(2, 852, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":853 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(2, 853, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 853, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(2, 853, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (unlikely(__pyx_t_6)) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 856, __pyx_L1_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":859 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_6)) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":860 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 860, __pyx_L1_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":870 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":871 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":872 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":873 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":875 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":878 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 878, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (unlikely(__pyx_t_6)) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(2, 880, __pyx_L1_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":883 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 883, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":884 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 884, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":885 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 885, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":886 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 886, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":887 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 887, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":888 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 888, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":889 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":890 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 890, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":891 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":892 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":893 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 893, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":894 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":895 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":896 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":897 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":898 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":899 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(2, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(__pyx_t_6)) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":901 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(2, 901, __pyx_L1_error) } __pyx_L15:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":902 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":906 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(2, 906, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":907 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< * PyArray_SetBaseObject(arr, base) * */ Py_INCREF(__pyx_v_base); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_v_base; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1027 * * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< * if base is NULL: * return None */ __pyx_v_base = PyArray_BASE(__pyx_v_arr); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ __pyx_t_1 = ((__pyx_v_base == NULL) != 0); if (__pyx_t_1) { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1029 * base = PyArray_BASE(arr) * if base is NULL: * return None # <<<<<<<<<<<<<< * return <object>base * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1030 * if base is NULL: * return None * return <object>base # <<<<<<<<<<<<<< * * # Versions of the import_* functions which are more suitable for */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_base)); __pyx_r = ((PyObject *)__pyx_v_base); goto __pyx_L0; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1036 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 1036, __pyx_L3_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1037 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 1037, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 1038, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(2, 1038, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1042 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 1042, __pyx_L3_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1043 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 1043, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 1044, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(2, 1044, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1048 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 1048, __pyx_L3_error) /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1049 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 1049, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1050 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 1050, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(2, 1050, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":379 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":381 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":384 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":383 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":385 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":387 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":391 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 393, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 393, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":394 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 394, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 394, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":396 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":401 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":403 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 403, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 403, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 406, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":407 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":409 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 409, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":410 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 410, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 414, __pyx_L1_error) /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":416 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 416, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 416, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 418, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":419 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 419, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 420, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":421 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":423 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 423, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":425 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":431 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 431, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":432 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 432, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":433 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":435 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 441, __pyx_L1_error) /* "View.MemoryView":442 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 442, __pyx_L1_error) /* "View.MemoryView":443 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 441, __pyx_L1_error) /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":447 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":452 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":455 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":457 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 457, __pyx_L1_error) /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":458 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":460 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":462 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":464 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":466 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":471 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 471, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":472 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":475 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":478 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 478, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":479 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":484 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":487 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":489 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":494 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":495 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":490 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 491, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 491, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":500 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 500, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":506 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 506, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":508 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 508, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 510, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 516, __pyx_L1_error) /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":519 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":521 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":524 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":526 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":529 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":531 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":534 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":536 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":538 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":539 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":540 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":541 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":542 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":543 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":550 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 550, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":551 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 551, __pyx_L1_error) /* "View.MemoryView":552 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":556 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 566, __pyx_L1_error) /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":568 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__19, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":575 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":579 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":592 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":594 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 594, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":595 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":597 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":599 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":603 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":605 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":609 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 609, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":612 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":618 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":619 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 619, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":624 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":625 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 625, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":629 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":631 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":632 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 632, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":637 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 637, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":641 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":643 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":644 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 644, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":649 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 649, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":654 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":655 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":656 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":660 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":668 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":670 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":672 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":673 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":674 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 675, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 675, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 678, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__22); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":679 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":681 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__22); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 681, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":682 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":685 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 685, __pyx_L1_error) /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":687 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":688 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 688, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":690 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 690, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":692 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 692, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__22); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":694 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":697 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 699, __pyx_L1_error) /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":707 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":714 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":718 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 718, __pyx_L1_error) } } #endif /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":721 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 721, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":722 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":724 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":725 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":731 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":732 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":737 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":738 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 742, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 742, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":747 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error) /* "View.MemoryView":744 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 744, __pyx_L1_error) /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":751 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":752 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":753 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":754 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":756 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 756, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 756, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":757 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 757, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 757, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 757, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":758 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 758, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 758, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 758, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":760 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":761 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":762 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":764 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 764, __pyx_L1_error) /* "View.MemoryView":770 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":774 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 774, __pyx_L1_error) } /* "View.MemoryView":775 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 775, __pyx_L1_error) } /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 773, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":779 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 778, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 778, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":826 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 828, __pyx_L1_error) /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":831 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":834 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 834, __pyx_L1_error) /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":841 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":844 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":846 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":851 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":855 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":857 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":862 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":864 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":867 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":871 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":874 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":877 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":880 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":881 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":882 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":886 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":888 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":893 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":895 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":896 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 895, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":898 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":900 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":908 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":909 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":913 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 913, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 913, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":914 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":916 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":917 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":919 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":922 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":924 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 924, __pyx_L1_error) /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":927 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 927, __pyx_L1_error) /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":929 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":931 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":933 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; /* "View.MemoryView":940 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":942 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":943 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":947 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":948 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":949 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":950 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":953 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 953, __pyx_L1_error) /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":955 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":973 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":977 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":979 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":983 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 983, __pyx_L1_error) /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":985 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":989 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1004 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1009 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1011 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1012 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1014 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1015 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1017 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1018 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1019 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1020 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1021 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1024 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1026 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1028 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1029 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1032 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1033 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1035 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1036 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1038 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1039 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1039, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1040 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1042 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1043 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1045 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1052 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1052, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1053 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1055 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1056 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1063 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1064 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1065 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1067 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1068 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1070 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1071 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1072 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1073 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1079 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1080 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1080, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1091 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1092 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1094 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1095 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1097 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1099 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1107 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1109 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1117 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1118 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1120 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1122 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1123 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1125 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1127 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1128 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1133 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1143 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1144 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1145 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1146 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1150 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1151 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1153 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1154 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1155 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1156 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1158 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1159 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1163 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1164 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1169 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1176 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1178 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1179 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1181 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1194 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1195 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1196 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1198 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1199 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1200 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1202 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; /* "View.MemoryView":1216 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1217 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1219 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1221 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1221, __pyx_L1_error) /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1224 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1225 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1226 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1227 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1228 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1230 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1234 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1236 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1241 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1243 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1251 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1250 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1250, __pyx_L1_error) /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1255 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1255, __pyx_L1_error) /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1260 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1260, __pyx_L1_error) /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1262 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1262, __pyx_L1_error) } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; /* "View.MemoryView":1273 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1274 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1276 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1277 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1278 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1282 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1284 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1286 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1288 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1291 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1292 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1294 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1294, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1297 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1302 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1304 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1304, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1305 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1311 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1317 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1318 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1319 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1320 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1321 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1326 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1326, __pyx_L1_error) /* "View.MemoryView":1327 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1327, __pyx_L1_error) /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1329 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1330 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1331 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1333 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1334 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1341 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1343 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1344 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1345 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1346 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1348 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1349 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1350 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1351 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1364 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1371 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1378 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1381 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1383 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1385 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1386 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1388 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1397 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1398 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1400 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1408 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1409 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1412 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1413 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1414 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1416 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1417 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1419 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator __pyx_vtable_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator; static PyObject *__pyx_tp_new_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)o); p->__pyx_vtab = __pyx_vtabptr_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator; p->intervals.data = NULL; p->intervals.memview = NULL; p->z_temp.data = NULL; p->z_temp.memview = NULL; p->z_half.data = NULL; p->z_half.memview = NULL; p->dz_dt.data = NULL; p->dz_dt.memview = NULL; return o; } static void __pyx_tp_dealloc_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator(PyObject *o) { struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *p = (struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif __PYX_XDEC_MEMVIEW(&p->intervals, 1); __PYX_XDEC_MEMVIEW(&p->z_temp, 1); __PYX_XDEC_MEMVIEW(&p->z_half, 1); __PYX_XDEC_MEMVIEW(&p->dz_dt, 1); (*Py_TYPE(o)->tp_free)(o); } static PyMethodDef __pyx_methods_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator[] = { {"forward_integrate", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_3forward_integrate, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_2forward_integrate}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_5__reduce_cython__, METH_NOARGS, __pyx_doc_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_4__reduce_cython__}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_7__setstate_cython__, METH_O, __pyx_doc_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_6__setstate_cython__}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator = { PyVarObject_HEAD_INIT(0, 0) "dapy.integrators.implicitmidpoint.ImplicitMidpointIntegrator", /*tp_name*/ sizeof(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ "ImplicitMidpointIntegrator(int dim_z, double dt, double tol, int max_iters, int n_threads=4)", /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_pw_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_1__init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "dapy.integrators.implicitmidpoint.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "dapy.integrators.implicitmidpoint.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "dapy.integrators.implicitmidpoint.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "dapy.integrators.implicitmidpoint._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_implicitmidpoint(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_implicitmidpoint}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "implicitmidpoint", __pyx_k_Implicit_mid_point_integrator_fo, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_ConvergenceError, __pyx_k_ConvergenceError, sizeof(__pyx_k_ConvergenceError), 0, 0, 1, 1}, {&__pyx_kp_u_Convergence_error_in_implicit_mi, __pyx_k_Convergence_error_in_implicit_mi, sizeof(__pyx_k_Convergence_error_in_implicit_mi), 0, 1, 0, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_ImplicitMidpointIntegrator, __pyx_k_ImplicitMidpointIntegrator, sizeof(__pyx_k_ImplicitMidpointIntegrator), 0, 0, 1, 1}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0x85, __pyx_k_Incompatible_checksums_s_vs_0x85, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x85), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_kp_s_Raised_when_implicit_integrator, __pyx_k_Raised_when_implicit_integrator, sizeof(__pyx_k_Raised_when_implicit_integrator), 0, 0, 1, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dapy_integrators_implicitmidpoin, __pyx_k_dapy_integrators_implicitmidpoin, sizeof(__pyx_k_dapy_integrators_implicitmidpoin), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dim_z, __pyx_k_dim_z, sizeof(__pyx_k_dim_z), 0, 0, 1, 1}, {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, {&__pyx_n_u_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 1, 0, 1}, {&__pyx_n_s_dt, __pyx_k_dt, sizeof(__pyx_k_dt), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_u_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 1, 0, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_max_iters, __pyx_k_max_iters, sizeof(__pyx_k_max_iters), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, {&__pyx_n_s_n_steps, __pyx_k_n_steps, sizeof(__pyx_k_n_steps), 0, 0, 1, 1}, {&__pyx_n_s_n_threads, __pyx_k_n_threads, sizeof(__pyx_k_n_threads), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_ImplicitMidpointI, __pyx_k_pyx_unpickle_ImplicitMidpointI, sizeof(__pyx_k_pyx_unpickle_ImplicitMidpointI), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_start_time_index, __pyx_k_start_time_index, sizeof(__pyx_k_start_time_index), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tol, __pyx_k_tol, sizeof(__pyx_k_tol), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_z_particles, __pyx_k_z_particles, sizeof(__pyx_k_z_particles), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 41, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(2, 272, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(2, 856, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(2, 1038, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 400, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 609, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 828, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(2, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(2, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(2, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(2, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(2, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(2, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../../miniconda3/envs/dapy3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(2, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__19 = PyTuple_New(1); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__19, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__19); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__22 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__22)) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_ImplicitMidpointIntegrator(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_ImplicitMidpointI, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__32); __Pyx_GIVEREF(__pyx_tuple__32); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__33 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); __pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_139854930 = PyInt_FromLong(139854930L); if (unlikely(!__pyx_int_139854930)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator = &__pyx_vtable_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator; __pyx_vtable_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.update_dz_dt = (void (*)(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *, __Pyx_memviewslice, double, __Pyx_memviewslice))__pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_update_dz_dt; __pyx_vtable_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.implicit_midpoint_step = (int (*)(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *, __Pyx_memviewslice, double, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice))__pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_implicit_midpoint_step; __pyx_vtable_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.partition_particles = (void (*)(struct __pyx_obj_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator *, int))__pyx_f_4dapy_11integrators_16implicitmidpoint_26ImplicitMidpointIntegrator_partition_particles; if (PyType_Ready(&__pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator) < 0) __PYX_ERR(0, 12, __pyx_L1_error) __pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.tp_dictoffset && __pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator.tp_dict, __pyx_vtabptr_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator) < 0) __PYX_ERR(0, 12, __pyx_L1_error) if (PyObject_SetAttr(__pyx_m, __pyx_n_s_ImplicitMidpointIntegrator, (PyObject *)&__pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator) < 0) __PYX_ERR(0, 12, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator) < 0) __PYX_ERR(0, 12, __pyx_L1_error) __pyx_ptype_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator = &__pyx_type_4dapy_11integrators_16implicitmidpoint_ImplicitMidpointIntegrator; __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(2, 206, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(2, 229, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(2, 233, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(2, 242, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(2, 918, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initimplicitmidpoint(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initimplicitmidpoint(void) #else __Pyx_PyMODINIT_FUNC PyInit_implicitmidpoint(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_implicitmidpoint(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_implicitmidpoint(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; static PyThread_type_lock __pyx_t_5[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'implicitmidpoint' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_implicitmidpoint(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("implicitmidpoint", __pyx_methods, __pyx_k_Implicit_mid_point_integrator_fo, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_dapy__integrators__implicitmidpoint) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "dapy.integrators.implicitmidpoint")) { if (unlikely(PyDict_SetItemString(modules, "dapy.integrators.implicitmidpoint", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "dapy/integrators/implicitmidpoint.pyx":5 * cimport cython * from cython.parallel import prange * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * from libc.math cimport isnan, isinf, fabs */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "dapy/integrators/implicitmidpoint.pyx":9 * from libc.math cimport isnan, isinf, fabs * * class ConvergenceError(Exception): # <<<<<<<<<<<<<< * """Raised when implicit integrator step fails to converge.""" * */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); __Pyx_GIVEREF(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); __pyx_t_2 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_Py3MetaclassPrepare(__pyx_t_2, __pyx_t_1, __pyx_n_s_ConvergenceError, __pyx_n_s_ConvergenceError, (PyObject *) NULL, __pyx_n_s_dapy_integrators_implicitmidpoin, __pyx_kp_s_Raised_when_implicit_integrator); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_2, __pyx_n_s_ConvergenceError, __pyx_t_1, __pyx_t_3, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_ConvergenceError, __pyx_t_4) < 0) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":1 * def __pyx_unpickle_ImplicitMidpointIntegrator(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4dapy_11integrators_16implicitmidpoint_1__pyx_unpickle_ImplicitMidpointIntegrator, NULL, __pyx_n_s_dapy_integrators_implicitmidpoin); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_ImplicitMidpointI, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "dapy/integrators/implicitmidpoint.pyx":1 * """Implicit mid-point integrator for ordinary differential equations.""" # <<<<<<<<<<<<<< * * cimport cython */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_5[0] = PyThread_allocate_lock(); __pyx_t_5[1] = PyThread_allocate_lock(); __pyx_t_5[2] = PyThread_allocate_lock(); __pyx_t_5[3] = PyThread_allocate_lock(); __pyx_t_5[4] = PyThread_allocate_lock(); __pyx_t_5[5] = PyThread_allocate_lock(); __pyx_t_5[6] = PyThread_allocate_lock(); __pyx_t_5[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_5, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":545 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":991 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init dapy.integrators.implicitmidpoint", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init dapy.integrators.implicitmidpoint"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* DictGetItem */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { if (unlikely(PyTuple_Check(key))) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) { PyErr_SetObject(PyExc_KeyError, args); Py_DECREF(args); } } else { PyErr_SetObject(PyExc_KeyError, key); } } return NULL; } Py_INCREF(value); return value; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* CalculateMetaclass */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); for (i=0; i < nbases; i++) { PyTypeObject *tmptype; PyObject *tmp = PyTuple_GET_ITEM(bases, i); tmptype = Py_TYPE(tmp); #if PY_MAJOR_VERSION < 3 if (tmptype == &PyClass_Type) continue; #endif if (!metaclass) { metaclass = tmptype; continue; } if (PyType_IsSubtype(metaclass, tmptype)) continue; if (PyType_IsSubtype(tmptype, metaclass)) { metaclass = tmptype; continue; } PyErr_SetString(PyExc_TypeError, "metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases"); return NULL; } if (!metaclass) { #if PY_MAJOR_VERSION < 3 metaclass = &PyClass_Type; #else metaclass = &PyType_Type; #endif } Py_INCREF((PyObject*) metaclass); return (PyObject*) metaclass; } /* Py3ClassCreate */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { PyObject *ns; if (metaclass) { PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); if (prep) { PyObject *pargs = PyTuple_Pack(2, name, bases); if (unlikely(!pargs)) { Py_DECREF(prep); return NULL; } ns = PyObject_Call(prep, pargs, mkw); Py_DECREF(prep); Py_DECREF(pargs); } else { if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; PyErr_Clear(); ns = PyDict_New(); } } else { ns = PyDict_New(); } if (unlikely(!ns)) return NULL; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; return ns; bad: Py_DECREF(ns); return NULL; } static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass) { PyObject *result, *margs; PyObject *owned_metaclass = NULL; if (allow_py2_metaclass) { owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); if (owned_metaclass) { metaclass = owned_metaclass; } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { PyErr_Clear(); } else { return NULL; } } if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); Py_XDECREF(owned_metaclass); if (unlikely(!metaclass)) return NULL; owned_metaclass = metaclass; } margs = PyTuple_Pack(3, name, bases, dict); if (unlikely(!margs)) { result = NULL; } else { result = PyObject_Call(metaclass, margs, mkw); Py_DECREF(margs); } Py_XDECREF(owned_metaclass); return result; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_int(const char *itemp) { return (PyObject *) __Pyx_PyInt_From_int(*(int *) itemp); } static CYTHON_INLINE int __pyx_memview_set_int(const char *itemp, PyObject *obj) { int value = __Pyx_PyInt_As_int(obj); if ((value == (int)-1) && PyErr_Occurred()) return 0; *(int *) itemp = value; return 1; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = (float)(1.0) / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = (float)(1.0) / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0.0, -1.0); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = (double)(1.0) / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = (double)(1.0) / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0.0, -1.0); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
GB_unop__identity_uint64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_int32) // op(A') function: GB (_unop_tran__identity_uint64_int32) // C type: uint64_t // A type: int32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_int32) ( uint64_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matmul.par2d.c
#include <math.h> #include <omp.h> #define ceild(n, d) ceil(((double)(n)) / ((double)(d))) #define floord(n, d) floor(((double)(n)) / ((double)(d))) #define max(x, y) ((x) > (y) ? (x) : (y)) #define min(x, y) ((x) < (y) ? (x) : (y)) #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #define M 2048 #define N 2048 #define K 2048 #define alpha 1 #define beta 1 double A[M][K + 13]; double B[K][N + 13]; double C[M][N + 13]; #ifdef PERFCTR #include "papi_defs.h" #include <papi.h> #endif #include <sys/time.h> #include <unistd.h> #ifdef TIME #define IF_TIME(foo) foo; #else #define IF_TIME(foo) #endif void init_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (i + j); B[i][j] = (double)(i * j); C[i][j] = 0.0; } } } void print_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { fprintf(stderr, "%lf ", C[i][j]); if (j % 80 == 79) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } } double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } double t_start, t_end; int main() { int i, j, k; register double s; init_array(); #ifdef PERFCTR PERF_INIT; #endif IF_TIME(t_start = rtclock()); int t1, t2, t3, t4, t5, t6; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; omp_set_nested(1); omp_set_num_threads(2); /* Generated from PLUTO-produced CLooG file by CLooG v0.14.1 64 bits in 0.01s. */ if ((M >= 1) && (N >= 1) && (K >= 1)) { lb1 = 0; ub1 = floord(M - 1, 32); #pragma omp parallel for shared(lb1, ub1) private(lb2, ub2, t1, t2, t3, t4, \ t5, t6) for (t1 = lb1; t1 <= ub1; t1++) { lb2 = 0; ub2 = floord(N - 1, 32); #pragma omp parallel for shared(t1, lb1, ub1, lb2, ub2) private(t2, t3, t4, \ t5, t6) for (t2 = lb2; t2 <= ub2; t2++) { for (t3 = 0; t3 <= floord(K - 1, 32); t3++) { for (t4 = max(0, 32 * t1); t4 <= min(M - 1, 32 * t1 + 31); t4++) { for (t5 = max(0, 32 * t3); t5 <= min(K - 1, 32 * t3 + 31); t5++) { { lbv = max(0, 32 * t2); ubv = min(N - 1, 32 * t2 + 31); #pragma ivdep #pragma vector always for (t6 = lbv; t6 <= ubv; t6++) { C[t4][t6] = C[t4][t6] + A[t4][t5] * B[t5][t6]; ; } } } } } } } } /* End of CLooG code */ IF_TIME(t_end = rtclock()); IF_TIME(printf("%0.6lfs\n", t_end - t_start)); #ifdef PERFCTR PERF_EXIT; #endif #ifdef TEST print_array(); #endif return 0; }
linop.h
#pragma once #include <Eigen/Dense> #include "chol.h" #include "bpmfutils.h" extern "C" { #include <csr.h> } class SparseFeat { public: BinaryCSR M; BinaryCSR Mt; SparseFeat() {} SparseFeat(int nrow, int ncol, long nnz, int* rows, int* cols) { new_bcsr(&M, nnz, nrow, ncol, rows, cols); new_bcsr(&Mt, nnz, ncol, nrow, cols, rows); } virtual ~SparseFeat() { free_bcsr( & M); free_bcsr( & Mt); } int nfeat() {return M.ncol;} int cols() {return M.ncol;} int nsamples() {return M.nrow;} int rows() {return M.nrow;} }; class SparseDoubleFeat { public: CSR M; CSR Mt; SparseDoubleFeat() {} SparseDoubleFeat(int nrow, int ncol, long nnz, int* rows, int* cols, double* vals) { new_csr(&M, nnz, nrow, ncol, rows, cols, vals); new_csr(&Mt, nnz, ncol, nrow, cols, rows, vals); } virtual ~SparseDoubleFeat() { free_csr( & M); free_csr( & Mt); } int nfeat() {return M.ncol;} int cols() {return M.ncol;} int nsamples() {return M.nrow;} int rows() {return M.nrow;} }; template<typename T> void solve_blockcg(Eigen::MatrixXd & X, T & t, double reg, Eigen::MatrixXd & B, double tol, const int blocksize, const int excess); template<typename T> int solve_blockcg(Eigen::MatrixXd & X, T & t, double reg, Eigen::MatrixXd & B, double tol); void At_mul_A(Eigen::MatrixXd & out, SparseFeat & A); void At_mul_A(Eigen::MatrixXd & out, SparseDoubleFeat & A); void At_mul_A(Eigen::MatrixXd & out, Eigen::MatrixXd & A); Eigen::VectorXd col_square_sum(SparseFeat & A); Eigen::VectorXd col_square_sum(SparseDoubleFeat & A); template<typename T> void compute_uhat(Eigen::MatrixXd & uhat, T & feat, Eigen::MatrixXd & beta); template<typename T> void AtA_mul_B(Eigen::MatrixXd & out, T & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & tmp); // compile-time optimized versions (N - number of RHSs) template<typename T> inline void AtA_mul_B_switch(Eigen::MatrixXd & out, T & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & tmp); inline void AtA_mul_B_switch(Eigen::MatrixXd & out, Eigen::MatrixXd & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & tmp); template<int N> void AtA_mul_Bx(Eigen::MatrixXd & out, SparseFeat & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & tmp); template<int N> void AtA_mul_Bx(Eigen::MatrixXd & out, SparseDoubleFeat & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & tmp); template<int N> void A_mul_Bx(Eigen::MatrixXd & out, BinaryCSR & A, Eigen::MatrixXd & B); template<int N> void A_mul_Bx(Eigen::MatrixXd & out, CSR & A, Eigen::MatrixXd & B); void At_mul_B_blas(Eigen::MatrixXd & Y, Eigen::MatrixXd & A, Eigen::MatrixXd & B); void At_mul_A_blas(Eigen::MatrixXd & A, double* AtA); void A_mul_At_blas(Eigen::MatrixXd & A, double* AAt); void A_mul_B_blas(Eigen::MatrixXd & Y, Eigen::MatrixXd & A, Eigen::MatrixXd & B); void A_mul_Bt_blas(Eigen::MatrixXd & Y, Eigen::MatrixXd & A, Eigen::MatrixXd & B); //template<int N> //inline void A_mul_B_omp(double alpha, Eigen::MatrixXd & out, double beta, Eigen::Matrix<double, N, N> & A, Eigen::MatrixXd & B); inline void A_mul_B_omp(double alpha, Eigen::MatrixXd & out, double beta, Eigen::MatrixXd & A, Eigen::MatrixXd & B); /* template<int N> inline void A_mul_B_add_omp(Eigen::MatrixXd & out, Eigen::Matrix<double, N, N> & A, Eigen::MatrixXd & B); template<int N> inline void A_mul_B_sub_omp(Eigen::MatrixXd & out, Eigen::Matrix<double, N, N> & A, Eigen::MatrixXd & B); */ void A_mul_At_combo(Eigen::MatrixXd & out, Eigen::MatrixXd & A); void A_mul_At_omp(Eigen::MatrixXd & out, Eigen::MatrixXd & A); Eigen::MatrixXd A_mul_At_combo(Eigen::MatrixXd & A); void A_mul_Bt_omp_sym(Eigen::MatrixXd & out, Eigen::MatrixXd & A, Eigen::MatrixXd & B); // util functions: void A_mul_B( Eigen::VectorXd & out, BinaryCSR & csr, Eigen::VectorXd & b); void A_mul_Bt( Eigen::MatrixXd & out, BinaryCSR & csr, Eigen::MatrixXd & B); void A_mul_B( Eigen::VectorXd & out, CSR & csr, Eigen::VectorXd & b); void A_mul_Bt( Eigen::MatrixXd & out, CSR & csr, Eigen::MatrixXd & B); void makeSymmetric(Eigen::MatrixXd & A); // Y = beta * Y + alpha * A * B (where B is symmetric) void Asym_mul_B_left(double beta, Eigen::MatrixXd & Y, double alpha, Eigen::MatrixXd & A, Eigen::MatrixXd & B); void Asym_mul_B_right(double beta, Eigen::MatrixXd & Y, double alpha, Eigen::MatrixXd & A, Eigen::MatrixXd & B); // Y = X[:,col]' * B' inline void At_mul_Bt(Eigen::VectorXd & Y, SparseFeat & X, const int col, Eigen::MatrixXd & B) { const int* cols = X.Mt.cols; const int end = X.Mt.row_ptr[col + 1]; const int D = Y.size(); Y.setZero(); for (int i = X.Mt.row_ptr[col]; i < end; i++) { int c = cols[i]; for (int d = 0; d < D; d++) { Y(d) += B(d, c); } } } // Y = X[:,col]' * B' inline void At_mul_Bt(Eigen::VectorXd & Y, SparseDoubleFeat & X, const int col, Eigen::MatrixXd & B) { const int* cols = X.Mt.cols; const double* vals = X.Mt.vals; const int end = X.Mt.row_ptr[col + 1]; const int D = Y.size(); Y.setZero(); for (int i = X.Mt.row_ptr[col]; i < end; i++) { int c = cols[i]; double v = vals[i]; for (int d = 0; d < D; d++) { Y(d) += v * B(d, c); } } } // computes Z += A[:,col] * b', where a and b are vectors inline void add_Acol_mul_bt(Eigen::MatrixXd & Z, SparseFeat & A, const int col, Eigen::VectorXd & b) { const int* cols = A.Mt.cols; int i = A.Mt.row_ptr[col]; const int end = A.Mt.row_ptr[col + 1]; const int D = b.size(); for (; i < end; i++) { int c = cols[i]; for (int d = 0; d < D; d++) { Z(d, c) += b(d); } } } // computes Z += A[:,col] * b', where a and b are vectors inline void add_Acol_mul_bt(Eigen::MatrixXd & Z, SparseDoubleFeat & A, const int col, Eigen::VectorXd & b) { const int* cols = A.Mt.cols; const double* vals = A.Mt.vals; const int D = b.size(); int i = A.Mt.row_ptr[col]; const int end = A.Mt.row_ptr[col + 1]; for (; i < end; i++) { int c = cols[i]; double v = vals[i]; for (int d = 0; d < D; d++) { Z(d, c) += v * b(d); } } } /////////////////////////////////// // Template functions /////////////////////////////////// //// for Sparse /** * uhat - [D x N] dense matrix * sparseFeat - [N x F] sparse matrix (features) * beta - [D x F] dense matrix * computes: * uhat = beta * sparseFeat' */ template<> inline void compute_uhat(Eigen::MatrixXd & uhat, SparseFeat & sparseFeat, Eigen::MatrixXd & beta) { A_mul_Bt(uhat, sparseFeat.M, beta); } template<> inline void compute_uhat(Eigen::MatrixXd & uhat, SparseDoubleFeat & feat, Eigen::MatrixXd & beta) { A_mul_Bt(uhat, feat.M, beta); } /** computes uhat = denseFeat * beta, where beta and uhat are row ordered */ template<> inline void compute_uhat(Eigen::MatrixXd & uhat, Eigen::MatrixXd & denseFeat, Eigen::MatrixXd & beta) { A_mul_Bt_blas(uhat, beta, denseFeat); } /** good values for solve_blockcg are blocksize=32 an excess=8 */ template<typename T> inline void solve_blockcg(Eigen::MatrixXd & X, T & K, double reg, Eigen::MatrixXd & B, double tol, const int blocksize, const int excess) { if (B.rows() <= excess + blocksize) { solve_blockcg(X, K, reg, B, tol); return; } // split B into blocks of size <blocksize> (+ excess if needed) Eigen::MatrixXd Xblock, Bblock; for (int i = 0; i < B.rows(); i += blocksize) { int nrows = blocksize; if (i + blocksize + excess >= B.rows()) { nrows = B.rows() - i; } Bblock.resize(nrows, B.cols()); Xblock.resize(nrows, X.cols()); Bblock = B.block(i, 0, nrows, B.cols()); solve_blockcg(Xblock, K, reg, Bblock, tol); X.block(i, 0, nrows, X.cols()) = Xblock; } } template<typename T> inline int solve_blockcg(Eigen::MatrixXd & X, T & K, double reg, Eigen::MatrixXd & B, double tol) { // initialize const int nfeat = B.cols(); const int nrhs = B.rows(); double tolsq = tol*tol; if (nfeat != K.cols()) {throw std::runtime_error("B.cols() must equal K.cols()");} Eigen::VectorXd norms(nrhs), inorms(nrhs); norms.setZero(); inorms.setZero(); #pragma omp parallel for schedule(static) for (int rhs = 0; rhs < nrhs; rhs++) { double sumsq = 0.0; for (int feat = 0; feat < nfeat; feat++) { sumsq += B(rhs, feat) * B(rhs, feat); } norms(rhs) = sqrt(sumsq); inorms(rhs) = 1.0 / norms(rhs); } Eigen::MatrixXd R(nrhs, nfeat); Eigen::MatrixXd P(nrhs, nfeat); Eigen::MatrixXd Ptmp(nrhs, nfeat); X.setZero(); // normalize R and P: #pragma omp parallel for schedule(static) collapse(2) for (int feat = 0; feat < nfeat; feat++) { for (int rhs = 0; rhs < nrhs; rhs++) { R(rhs, feat) = B(rhs, feat) * inorms(rhs); P(rhs, feat) = R(rhs, feat); } } Eigen::MatrixXd* RtR = new Eigen::MatrixXd(nrhs, nrhs); Eigen::MatrixXd* RtR2 = new Eigen::MatrixXd(nrhs, nrhs); Eigen::MatrixXd KP(nrhs, nfeat); Eigen::MatrixXd KPtmp(nrhs, K.rows()); Eigen::MatrixXd PtKP(nrhs, nrhs); //Eigen::Matrix<double, N, N> A; //Eigen::Matrix<double, N, N> Psi; Eigen::MatrixXd A; Eigen::MatrixXd Psi; A_mul_At_combo(*RtR, R); makeSymmetric(*RtR); const int nblocks = (int)ceil(nfeat / 64.0); // CG iteration: int iter = 0; for (iter = 0; iter < 100000; iter++) { // KP = K * P ////double t1 = tick(); AtA_mul_B_switch(KP, K, reg, P, KPtmp); ////double t2 = tick(); //A_mul_Bt_blas(PtKP, P, KP); // TODO: use KPtmp with dsyrk two save 2x time A_mul_Bt_omp_sym(PtKP, P, KP); Eigen::LLT<Eigen::MatrixXd> chol = PtKP.llt(); A = chol.solve(*RtR); A.transposeInPlace(); ////double t3 = tick(); #pragma omp parallel for schedule(dynamic, 4) for (int block = 0; block < nblocks; block++) { int col = block * 64; int bcols = std::min(64, nfeat - col); // X += A' * P X.block(0, col, nrhs, bcols).noalias() += A * P.block(0, col, nrhs, bcols); // R -= A' * KP R.block(0, col, nrhs, bcols).noalias() -= A * KP.block(0, col, nrhs, bcols); } ////double t4 = tick(); // convergence check: A_mul_At_combo(*RtR2, R); makeSymmetric(*RtR2); Eigen::VectorXd d = RtR2->diagonal(); //std::cout << "[ iter " << iter << "] " << d.cwiseSqrt() << "\n"; if ( (d.array() < tolsq).all()) { break; } // Psi = (R R') \ R2 R2' chol = RtR->llt(); Psi = chol.solve(*RtR2); Psi.transposeInPlace(); ////double t5 = tick(); // P = R + Psi' * P (P and R are already transposed) #pragma omp parallel for schedule(dynamic, 8) for (int block = 0; block < nblocks; block++) { int col = block * 64; int bcols = std::min(64, nfeat - col); Eigen::MatrixXd xtmp(nrhs, bcols); xtmp = Psi * P.block(0, col, nrhs, bcols); P.block(0, col, nrhs, bcols) = R.block(0, col, nrhs, bcols) + xtmp; } // R R' = R2 R2' std::swap(RtR, RtR2); ////double t6 = tick(); ////printf("t2-t1 = %.3f, t3-t2 = %.3f, t4-t3 = %.3f, t5-t4 = %.3f, t6-t5 = %.3f\n", t2-t1, t3-t2, t4-t3, t5-t4, t6-t5); } // unnormalizing X: #pragma omp parallel for schedule(static) collapse(2) for (int feat = 0; feat < nfeat; feat++) { for (int rhs = 0; rhs < nrhs; rhs++) { X(rhs, feat) *= norms(rhs); } } delete RtR; delete RtR2; return iter; } template<int N> void A_mul_Bx(Eigen::MatrixXd & out, BinaryCSR & A, Eigen::MatrixXd & B) { assert(N == out.rows()); assert(N == B.rows()); assert(A.ncol == B.cols()); assert(A.nrow == out.cols()); int* row_ptr = A.row_ptr; int* cols = A.cols; const int nrow = A.nrow; double* Y = out.data(); double* X = B.data(); #pragma omp parallel for schedule(dynamic, 256) for (int row = 0; row < nrow; row++) { double tmp[N] = { 0 }; const int end = row_ptr[row + 1]; for (int i = row_ptr[row]; i < end; i++) { int col = cols[i] * N; for (int j = 0; j < N; j++) { tmp[j] += X[col + j]; } } int r = row * N; for (int j = 0; j < N; j++) { Y[r + j] = tmp[j]; } } } template<int N> void A_mul_Bx(Eigen::MatrixXd & out, CSR & A, Eigen::MatrixXd & B) { assert(N == out.rows()); assert(N == B.rows()); assert(A.ncol == B.cols()); assert(A.nrow == out.cols()); int* row_ptr = A.row_ptr; int* cols = A.cols; double* vals = A.vals; const int nrow = A.nrow; double* Y = out.data(); double* X = B.data(); #pragma omp parallel for schedule(dynamic, 256) for (int row = 0; row < nrow; row++) { double tmp[N] = { 0 }; const int end = row_ptr[row + 1]; for (int i = row_ptr[row]; i < end; i++) { int col = cols[i] * N; double val = vals[i]; for (int j = 0; j < N; j++) { tmp[j] += X[col + j] * val; } } int r = row * N; for (int j = 0; j < N; j++) { Y[r + j] = tmp[j]; } } } template<typename T> inline void AtA_mul_B_switch(Eigen::MatrixXd & out, T & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & tmp) { switch(B.rows()) { case 1: return AtA_mul_Bx<1>(out, A, reg, B, tmp); case 2: return AtA_mul_Bx<2>(out, A, reg, B, tmp); case 3: return AtA_mul_Bx<3>(out, A, reg, B, tmp); case 4: return AtA_mul_Bx<4>(out, A, reg, B, tmp); case 5: return AtA_mul_Bx<5>(out, A, reg, B, tmp); case 6: return AtA_mul_Bx<6>(out, A, reg, B, tmp); case 7: return AtA_mul_Bx<7>(out, A, reg, B, tmp); case 8: return AtA_mul_Bx<8>(out, A, reg, B, tmp); case 9: return AtA_mul_Bx<9>(out, A, reg, B, tmp); case 10: return AtA_mul_Bx<10>(out, A, reg, B, tmp); case 11: return AtA_mul_Bx<11>(out, A, reg, B, tmp); case 12: return AtA_mul_Bx<12>(out, A, reg, B, tmp); case 13: return AtA_mul_Bx<13>(out, A, reg, B, tmp); case 14: return AtA_mul_Bx<14>(out, A, reg, B, tmp); case 15: return AtA_mul_Bx<15>(out, A, reg, B, tmp); case 16: return AtA_mul_Bx<16>(out, A, reg, B, tmp); case 17: return AtA_mul_Bx<17>(out, A, reg, B, tmp); case 18: return AtA_mul_Bx<18>(out, A, reg, B, tmp); case 19: return AtA_mul_Bx<19>(out, A, reg, B, tmp); case 20: return AtA_mul_Bx<20>(out, A, reg, B, tmp); case 21: return AtA_mul_Bx<21>(out, A, reg, B, tmp); case 22: return AtA_mul_Bx<22>(out, A, reg, B, tmp); case 23: return AtA_mul_Bx<23>(out, A, reg, B, tmp); case 24: return AtA_mul_Bx<24>(out, A, reg, B, tmp); case 25: return AtA_mul_Bx<25>(out, A, reg, B, tmp); case 26: return AtA_mul_Bx<26>(out, A, reg, B, tmp); case 27: return AtA_mul_Bx<27>(out, A, reg, B, tmp); case 28: return AtA_mul_Bx<28>(out, A, reg, B, tmp); case 29: return AtA_mul_Bx<29>(out, A, reg, B, tmp); case 30: return AtA_mul_Bx<30>(out, A, reg, B, tmp); case 31: return AtA_mul_Bx<31>(out, A, reg, B, tmp); case 32: return AtA_mul_Bx<32>(out, A, reg, B, tmp); case 33: return AtA_mul_Bx<33>(out, A, reg, B, tmp); case 34: return AtA_mul_Bx<34>(out, A, reg, B, tmp); case 35: return AtA_mul_Bx<35>(out, A, reg, B, tmp); case 36: return AtA_mul_Bx<36>(out, A, reg, B, tmp); case 37: return AtA_mul_Bx<37>(out, A, reg, B, tmp); case 38: return AtA_mul_Bx<38>(out, A, reg, B, tmp); case 39: return AtA_mul_Bx<39>(out, A, reg, B, tmp); case 40: return AtA_mul_Bx<40>(out, A, reg, B, tmp); default: throw std::runtime_error("BlockCG only available for up to 40 RHSs."); } } inline void AtA_mul_B_switch( Eigen::MatrixXd & out, Eigen::MatrixXd & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & tmp) { out.noalias() = (A.transpose() * (A * B.transpose())).transpose() + reg * B; } template<int N> void AtA_mul_Bx(Eigen::MatrixXd & out, SparseFeat & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & inner) { assert(N == out.rows()); assert(N == B.rows()); assert(A.cols() == B.cols()); assert(A.cols() == out.cols()); assert(A.rows() == inner.cols()); A_mul_Bx<N>(inner, A.M, B); int* row_ptr = A.Mt.row_ptr; int* cols = A.Mt.cols; const int nrow = A.Mt.nrow; double* Y = out.data(); double* X = inner.data(); double* Braw = B.data(); #pragma omp parallel for schedule(dynamic, 256) for (int row = 0; row < nrow; row++) { double tmp[N] = { 0 }; const int end = row_ptr[row + 1]; for (int i = row_ptr[row]; i < end; i++) { int col = cols[i] * N; for (int j = 0; j < N; j++) { tmp[j] += X[col + j]; } } int r = row * N; for (int j = 0; j < N; j++) { Y[r + j] = tmp[j] + reg * Braw[r + j]; } } } template<int N> void AtA_mul_Bx(Eigen::MatrixXd & out, SparseDoubleFeat & A, double reg, Eigen::MatrixXd & B, Eigen::MatrixXd & inner) { assert(N == out.rows()); assert(N == B.rows()); assert(A.cols() == B.cols()); assert(A.cols() == out.cols()); assert(A.rows() == inner.cols()); A_mul_Bx<N>(inner, A.M, B); int* row_ptr = A.Mt.row_ptr; int* cols = A.Mt.cols; double* vals = A.Mt.vals; const int nrow = A.Mt.nrow; double* Y = out.data(); double* X = inner.data(); double* Braw = B.data(); #pragma omp parallel for schedule(dynamic, 256) for (int row = 0; row < nrow; row++) { double tmp[N] = { 0 }; const int end = row_ptr[row + 1]; for (int i = row_ptr[row]; i < end; i++) { int col = cols[i] * N; double val = vals[i]; for (int j = 0; j < N; j++) { tmp[j] += X[col + j] * val; } } int r = row * N; for (int j = 0; j < N; j++) { Y[r + j] = tmp[j] + reg * Braw[r + j]; } } } // computes out = alpha * out + beta * A * B inline void A_mul_B_omp( double alpha, Eigen::MatrixXd & out, double beta, Eigen::MatrixXd & A, Eigen::MatrixXd & B) { assert(out.cols() == B.cols()); const int nblocks = (int)ceil(out.cols() / 64.0); const int nrow = out.rows(); const int ncol = out.cols(); #pragma omp parallel for schedule(dynamic, 8) for (int block = 0; block < nblocks; block++) { int col = block * 64; int bcols = std::min(64, ncol - col); out.block(0, col, nrow, bcols).noalias() = alpha * out.block(0, col, nrow, bcols) + beta * A * B.block(0, col, nrow, bcols); } } /* template<int N> inline void A_mul_B_omp( double alpha, Eigen::MatrixXd & out, double beta, Eigen::Matrix<double, N, N> & A, Eigen::MatrixXd & B) { assert(out.cols() == B.cols()); const int nblocks = out.cols() / 64; #pragma omp parallel for schedule(dynamic, 8) for (int block = 0; block < nblocks; block++) { int col = block * 64; out.template block<N, 64>(0, col).noalias() = alpha * out.template block<N, 64>(0, col) + beta * A * B.template block<N, 64>(0, col); } // last remaining block int col = nblocks * 64; out.block(0, col, N, out.cols() - col) = alpha * out.block(0, col, N, out.cols() - col) + beta * A * B.block(0, col, N, out.cols() - col); } */
7.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #define N 1000000 #define MAX_ITER 25 /* Ускорить выполнение цикла for в программе, вычисляющей покоординатную функцию от элементов массива a: a[i]=F(a[i]); */ float func(float element){ return element * element * element - element * 0.15; } int main() { srand(time(0)); float a[N]; float full_time = 0.; struct timeval start_s, end_s; for (int iter = 0; iter < MAX_ITER; iter++){ for (int i = 0; i < N; i++){ a[i] = (float)rand()/(float)RAND_MAX; } gettimeofday(&start_s, NULL); #pragma omp parallel for for (int i = 0; i < N; i++){ a[i] = func(a[i]); } gettimeofday(&end_s, NULL); full_time += ((end_s.tv_sec - start_s.tv_sec) * 1000000u + end_s.tv_usec - start_s.tv_usec) / 1.e6; } printf("time: %f sec\n", full_time / MAX_ITER); }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices: % The vertex nearest the origin in RGB space and the vertex farthest from % the origin. % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of % pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _RealPixelPacket { MagickRealType red, green, blue, opacity; } RealPixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; RealPixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; RealPixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; RealPixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(const Image *,CubeInfo *,const NodeInfo *), PruneToCubeDepth(const Image *,CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,RealPixelPacket *alpha_pixel) { MagickRealType alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline Quantum ClampPixel(const MagickRealType value) { if (value < 0.0f) return(0); if (value >= (MagickRealType) QuantumRange) return((Quantum) QuantumRange); #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) (value+0.5f)); #else return(value); #endif } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const RealPixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace); if (AcquireImageColormap(image,cube_info->colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { RealPixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { Quantum intensity; register PixelPacket *restrict q; register ssize_t i; /* Monochrome image. */ q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { intensity=(Quantum) (GetPixelIntensity(image,q) < ((MagickRealType) QuantumRange/2.0) ? 0 : QuantumRange); SetPixelRed(q,intensity); SetPixelGreen(q,intensity); SetPixelBlue(q,intensity); q++; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if (cube_info->quantize_info->colorspace == TransparentColorspace) associate_alpha=MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; RealPixelPacket error, mid, midpoint, pixel; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace); midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=count*sqrt((double) (error.red*error.red+ error.green*error.green+error.blue*error.blue+ error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale* ClampPixel(pixel.opacity); p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(image,cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=count*sqrt((double) (error.red*error.red+ error.green*error.green+error.blue*error.blue+ error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*ClampPixel( pixel.opacity); p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; register MagickRealType alpha, beta, distance; register PixelPacket *restrict p; register RealPixelPacket *restrict q; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha-beta; distance+=pixel*pixel; if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register MagickRealType alpha; register PixelPacket *restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { double gamma; gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity)); gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); quantize_info->signature=(~MagickSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static RealPixelPacket **DestroyPixelThreadSet(RealPixelPacket **pixels) { register ssize_t i; assert(pixels != (RealPixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (RealPixelPacket *) NULL) pixels[i]=(RealPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(RealPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static RealPixelPacket **AcquirePixelThreadSet(const size_t count) { RealPixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(RealPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (RealPixelPacket **) NULL) return((RealPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(RealPixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (RealPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const RealPixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; RealPixelPacket **pixels; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (RealPixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; RealPixelPacket *current, *previous; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { RealPixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5*previous[u].opacity/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FloydSteinbergDither) #endif proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" MagickBooleanType proceed; RealPixelPacket color, pixel; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,&image->exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ for (i=0; i < (ssize_t) length; i++) cube_info->cache[i]=(-1); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=1UL*GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs(alpha*GetPixelRed(p)-beta*image->colormap[index].red); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelGreen(p)-beta*image->colormap[index].green); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelBlue(p)-beta*image->colormap[index].blue); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImageChannel) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(image,cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(image,cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(image,cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ static MagickBooleanType DirectToColormapImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t number_colors; ssize_t y; status=MagickTrue; number_colors=(size_t) (image->columns*image->rows); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->colors != number_colors) return(MagickFalse); i=0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType proceed; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { image->colormap[i].red=GetPixelRed(q); image->colormap[i].green=GetPixelGreen(q); image->colormap[i].blue=GetPixelBlue(q); image->colormap[i].opacity=GetPixelOpacity(q); SetPixelIndex(indexes+x,i); i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->matte == MagickFalse) { if ((image->columns*image->rows) <= maximum_colors) (void) DirectToColormapImage(image,&image->exception); if (IsGrayImage(image,&image->exception) != MagickFalse) (void) SetGrayscaleImage(image); } if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) return(MagickTrue); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(const Image *image,CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(image,cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(image,cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(image,cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { PixelPacket *color_1, *color_2; int intensity; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxMap+1, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { ExceptionInfo *exception; for (i=0; i <= (ssize_t) MaxMap; i++) colormap_index[i]=(-1); if (AcquireImageColormap(image,MaxMap+1) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(unsigned short) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (IsMonochromeImage(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
ft_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Fourier transformed AO pair * \int e^{-i Gv \cdot r} i(r) * j(r) dr^3 * * eval_gz, b, gxyz, gs: * - when eval_gz is GTO_Gv_uniform_orth * > b (reciprocal vectors) is diagonal 3x3 matrix * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of G-vectors along each direction (nGv=gs[0]*gs[1]*gs[2]). * - when eval_gz is GTO_Gv_uniform_nonorth * > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of *positive* G-vectors along each direction. * - when eval_gz is GTO_Gv_general * only Gv is needed * - when eval_gz is GTO_Gv_nonuniform_orth * > b is the basic G value for each cartesian component * Gx = b[:gs[0]] * Gy = b[gs[0]:gs[0]+gs[1]] * Gz = b[gs[0]+gs[1]:] * > gs[3]: Number of basic G values along each direction. * > gxyz[3,nGv] are used to index the basic G value * > Gv is not used */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <complex.h> #include "config.h" #include "cint.h" #include "gto/gto.h" #include "gto/ft_ao.h" #include "np_helper/np_helper.h" #define SQRTPI 1.7724538509055160272981674833411451 #define EXP_CUTOFF 100 double CINTsquare_dist(const double *r1, const double *r2); double CINTcommon_fac_sp(int l); /* * Pyscf-1.5 (and older) use libcint function CINTinit_int1e_EnvVars and * CINTg1e_index_xyz. It's unsafe since the CINTEnvVars type was redefined * in ft_ao.h. Copy the contents of CINTinit_int1e_EnvVars and * CINTg1e_index_xyz here. */ #define IINC 0 #define JINC 1 #define GSHIFT 4 #define POS_E1 5 #define RYS_ROOTS 6 #define TENSOR 7 void GTO_ft_init1e_envs(CINTEnvVars *envs, int *ng, int *shls, int *atm, int natm, int *bas, int nbas, double *env) { envs->natm = natm; envs->nbas = nbas; envs->atm = atm; envs->bas = bas; envs->env = env; envs->shls = shls; const int i_sh = shls[0]; const int j_sh = shls[1]; envs->i_l = bas(ANG_OF, i_sh); envs->j_l = bas(ANG_OF, j_sh); envs->x_ctr[0] = bas(NCTR_OF, i_sh); envs->x_ctr[1] = bas(NCTR_OF, j_sh); envs->nfi = (envs->i_l+1)*(envs->i_l+2)/2; envs->nfj = (envs->j_l+1)*(envs->j_l+2)/2; envs->nf = envs->nfi * envs->nfj; envs->common_factor = 1; envs->gbits = ng[GSHIFT]; envs->ncomp_e1 = ng[POS_E1]; envs->ncomp_tensor = ng[TENSOR]; envs->li_ceil = envs->i_l + ng[IINC]; envs->lj_ceil = envs->j_l + ng[JINC]; if (ng[RYS_ROOTS] > 0) { envs->nrys_roots = ng[RYS_ROOTS]; } else { envs->nrys_roots = (envs->li_ceil + envs->lj_ceil)/2 + 1; } envs->ri = env + atm(PTR_COORD, bas(ATOM_OF, i_sh)); envs->rj = env + atm(PTR_COORD, bas(ATOM_OF, j_sh)); int dli, dlj; if (envs->li_ceil < envs->lj_ceil) { dli = envs->li_ceil + 1; dlj = envs->li_ceil + envs->lj_ceil + 1; } else { dli = envs->li_ceil + envs->lj_ceil + 1; dlj = envs->lj_ceil + 1; } envs->g_stride_i = 1; envs->g_stride_j = dli; envs->g_size = dli * dlj; envs->lk_ceil = 1; envs->ll_ceil = 1; envs->g_stride_k = 0; envs->g_stride_l = 0; } void CINTcart_comp(int *nx, int *ny, int *nz, const int lmax); static void _g2c_index_xyz(int *idx, const CINTEnvVars *envs) { int i_l = envs->i_l; int j_l = envs->j_l; int nfi = envs->nfi; int nfj = envs->nfj; int di = envs->g_stride_i; int dj = envs->g_stride_j; int i, j, n; int ofx, ofjx; int ofy, ofjy; int ofz, ofjz; int i_nx[CART_MAX], i_ny[CART_MAX], i_nz[CART_MAX]; int j_nx[CART_MAX], j_ny[CART_MAX], j_nz[CART_MAX]; CINTcart_comp(i_nx, i_ny, i_nz, i_l); CINTcart_comp(j_nx, j_ny, j_nz, j_l); ofx = 0; ofy = envs->g_size; ofz = envs->g_size * 2; n = 0; for (j = 0; j < nfj; j++) { ofjx = ofx + dj * j_nx[j]; ofjy = ofy + dj * j_ny[j]; ofjz = ofz + dj * j_nz[j]; for (i = 0; i < nfi; i++) { idx[n+0] = ofjx + di * i_nx[i]; idx[n+1] = ofjy + di * i_ny[i]; idx[n+2] = ofjz + di * i_nz[i]; n += 3; } } } static const int _LEN_CART[] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136 }; static const int _CUM_LEN_CART[] = { 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816, }; /* * WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0] * WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0] * WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0] */ static const int _UPIDY[] = { 1, 3, 4, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103, 105,106,107,108,109,110,111,112,113,114,115,116,117,118, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, }; static const int _UPIDZ[] = { 2, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104, 106,107,108,109,110,111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, }; /* * _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D * recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i} * _DOWN_XYZ_ORDER i in i/2a * _DOWN2 index of f_{i-1} * _DOWN_XYZ index of X * _DOWN1 index of f_{i} */ static const int _DOWN1[] = { -1, 0, 0, 0, 0, 1, 2, 1, 2, 2, 0, 0, 0, 3, 4, 5, 3, 3, 5, 5, 0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119, }; static const int _DOWN2[] = { -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, 1, -1, -1, 2, 0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104, }; static const int _DOWN_XYZ[] = { 2, 0, 1, 2, 0, 0, 0, 1, 1, 2, 0, 1, 2, 0, 0, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, }; static const int _DOWN_XYZ_ORDER[] = { 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3, 4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4, 5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5, 6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6, 7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7, 8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8, 9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9, 10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10, 11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11, 12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12, 13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14, }; #define WHEREX_IF_L_INC1(i) i #define WHEREY_IF_L_INC1(i) _UPIDY[i] #define WHEREZ_IF_L_INC1(i) _UPIDZ[i] #define STARTX_IF_L_DEC1(i) 0 #define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2]) #define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1) #define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m] #define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m] static int vrr1d_withGv(double complex *g, double *rijri, double aij, double *Gv, int topl, size_t NGv) { int cumxyz = 1; if (topl == 0) { return cumxyz; } double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; int i, n, m, l; double a2; double complex *p0, *p1, *p2, *dec1, *dec2; double *ka2 = malloc(sizeof(double) * NGv*3); double *kxa2 = ka2; double *kya2 = kxa2 + NGv; double *kza2 = kya2 + NGv; a2 = .5 / aij; for (n = 0; n < NGv; n++) { kxa2[n] = kx[n] * a2; kya2[n] = ky[n] * a2; kza2[n] = kz[n] * a2; } p0 = g + NGv; for (n = 0; n < NGv; n++) { p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n]; p0[NGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n]; p0[NGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n]; } cumxyz += 3; for (l = 1; l < topl; l++) { p0 = g + cumxyz * NGv; dec1 = p0 - _LEN_CART[l ] * NGv; dec2 = dec1 - _LEN_CART[l-1] * NGv; for (i = 0; i < _LEN_CART[l+1]; i++) { m = DEC1_XYZ(l+1,i); kxa2 = ka2 + m * NGv; p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * NGv; p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * NGv; if (ADDR_IF_L_DEC2(l+1,i) < 0) { for (n = 0; n < NGv; n++) { p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } else { a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i); for (n = 0; n < NGv; n++) { p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } p0 += NGv; } cumxyz += _LEN_CART[l+1]; } free(ka2); return cumxyz; } /* * if li = 3, lj = 1 * (10 + X*00 -> 01): * gs + X*fs -> fp */ static void vrr2d_ket_inc1_withGv(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { NPzcopy(out, g, _LEN_CART[li]*NGv); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } p01 += NGv; } } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } p01 += NGv; } } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } p01 += NGv; } } } /* * transpose i, j when storing into out */ static void vrr2d_inc1_swapij(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { NPzcopy(out, g, _LEN_CART[li]*NGv); return; } const int row_01 = _LEN_CART[lj]; const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } } out += NGv; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } } out += NGv; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } } } } static void vrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { vrr2d_ket_inc1_withGv(pg01, pg00, rirj, i, j, NGv); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_ket_inc1_withGv(out, g01, rirj, li, lj, NGv); } /* (0,li+lj) => (li,lj) */ static void hrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rjri[3]; rjri[0] = rj[0] - ri[0]; rjri[1] = rj[1] - ri[1]; rjri[2] = rj[2] - ri[2]; g00 = gbuf2; g01 = g; for (i = 1; i < li; i++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (j = lj; j <= nmax-i; j++) { vrr2d_ket_inc1_withGv(pg01, pg00, rjri, j, i, NGv); row_01 = _LEN_CART[j]; col_01 = _LEN_CART[i]; row_00 = _LEN_CART[j ]; col_00 = _LEN_CART[i-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_inc1_swapij(out, g01, rjri, lj, li, NGv); } /* * Recursive relation */ static void aopair_rr_igtj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijri[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); vrr1d_withGv(g, rijri, aij, Gv, topl, NGv); } static void aopair_rr_iltj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijrj[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); vrr1d_withGv(g, rijrj, aij, Gv, topl, NGv); } static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nmax = envs->li_ceil + envs->lj_ceil; const int lj = envs->lj_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijri[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n, ptr; double ia2; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); if (nmax > 0) { for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[NGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[NGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[NGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * NGv; off1 = i * NGv; off2 = (i+1) * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (j = 1; j <= lj; j++) { ptr = dj * j; for (i = ptr; i <= ptr + nmax - j; i++) { off0 = i * NGv - dj * NGv; // [i, j-1] off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1] off2 = i * NGv; // [i, j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nmax = envs->li_ceil + envs->lj_ceil; const int li = envs->li_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijrj[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n; double ia2; rirj[0] = rj[0] - ri[0]; rirj[1] = rj[1] - ri[1]; rirj[2] = rj[2] - ri[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); if (nmax > 0) { off0 = dj * NGv; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * dj * NGv; off1 = i * dj * NGv; off2 = (i+1) * dj * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (i = 1; i <= li; i++) { for (j = 0; j <= nmax - i; j++) { off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ] off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1] off2 = i * NGv + j * dj * NGv; // [i ,j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void inner_prod(double complex *g, double complex *gout, int *idx, const CINTEnvVars *envs, double *Gv, size_t NGv, int empty) { int ix, iy, iz, n, k; double complex *gz = g + envs->g_size * NGv * 2; if (empty) { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } else { gout[n*NGv+k] = 0; } } } } else { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } } } } } static void prim_to_ctr(double complex *gc, const size_t nf, double complex *gp, const int nprim, const int nctr, const double *coeff, int empty) { size_t n, i; double c; if (empty) { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; for (i = 0; i < nf; i++) { gc[i] = gp[i] * c; } gc += nf; } } else { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; if (c != 0) { for (i = 0; i < nf; i++) { gc[i] += gp[i] * c; } } gc += nf; } } } static void transpose(double complex *out, double complex *in, int nf, int comp, size_t NGv) { size_t n, k, ic; double complex *pin; for (ic = 0; ic < comp; ic++) { for (n = 0; n < nf; n++) { pin = in + (n*comp+ic) * NGv; for (k = 0; k < NGv; k++) { out[n*NGv+k] = pin[k]; } } out += nf * NGv; } } static const int _GBUFSIZE[] = { 1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384, 56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640, 120, 165, 243, 486, 816, 1360, 2040, 3030 }; #define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))] int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp, n; int empty[2] = {1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; const size_t len1 = bufsize(i_l,j_l) * NGv; const size_t leni = len1 * i_ctr; const size_t lenj = len1 * i_ctr * j_ctr; double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1)); if (gctrj == NULL) { fprintf(stderr, "gctrj = malloc(%zu) falied in GTO_aopair_early_contractv\n", sizeof(double complex) * (lenj + leni + len1)); } double complex *g = gctrj + lenj; double complex *gctri, *g1d; if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g; g += leni; } g1d = g; void (*aopair_rr)(); int offset_g1d; if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_early; offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l]; } else { aopair_rr = aopair_rr_iltj_early; offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l]; } int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d; double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXP_CUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); fac1i = fac1j * dij; (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv, cache); prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv, i_prim, i_ctr, ci+ip, *iempty); *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri, j_prim,j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (!*jempty) { g1d = gctrj; for (n = 0; n < i_ctr*j_ctr; n++) { if (i_l >= j_l) { vrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } else { hrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } g1d += len_g1d * NGv; } } free(gctrj); return !*jempty; } int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp; int empty[3] = {1, 1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; int *gempty = empty + 2; const size_t len1 = envs->g_size * 3 * (1<<envs->gbits) * NGv; const size_t leng = nf * n_comp * NGv; const size_t leni = nf * i_ctr * n_comp * NGv; size_t lenj = 0; if (n_comp > 1) { lenj = nf * i_ctr * j_ctr * n_comp * NGv; } double complex *g = malloc(sizeof(double complex) * (len1+leng+leni+lenj)); if (g == NULL) { fprintf(stderr, "g = malloc(%zu) falied in GTO_aopair_lazy_contract\n", sizeof(double complex) * (len1 + leng + leni + lenj)); } double complex *g1 = g + len1; double complex *gout, *gctri, *gctrj; if (n_comp == 1) { gctrj = gctr; } else { gctrj = g1; g1 += lenj; } if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g1; g1 += leni; } if (i_ctr == 1) { gout = gctri; gempty = iempty; } else { gout = g1; } void (*aopair_rr)(); if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_lazy; } else { aopair_rr = aopair_rr_iltj_lazy; } int *idx = malloc(sizeof(int) * nf * 3); _g2c_index_xyz(idx, envs); double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { envs->aj[0] = aj[jp]; if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { envs->ai[0] = ai[ip]; aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXP_CUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); if (i_ctr == 1) { fac1i = fac1j * dij * ci[ip]; } else { fac1i = fac1j * dij; } (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv, cache); (*envs->f_gout)(g, gout, idx, envs, Gv, NGv, *gempty); if (i_ctr > 1) { prim_to_ctr(gctri, nf*n_comp*NGv, gout, i_prim, i_ctr, ci+ip, *iempty); } *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*nf*n_comp*NGv, gctri, j_prim, j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (n_comp > 1 && !*jempty) { transpose(gctr, gctrj, nf*i_ctr*j_ctr, n_comp, NGv); } free(g); free(idx); return !*jempty; } void GTO_Gv_general(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; const double cutoff = EXP_CUTOFF * aij * 4; int n; double kR, kk; for (n = 0; n < NGv; n++) { kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2]; out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I); } else { out[n] = 0; } } } /* * Gv = dot(b.T,gxyz) + kpt * kk = dot(Gv, Gv) * kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt) * out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I); * * b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij}, * followed by 3*NGv floats for Gbase */ void GTO_Gv_orth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[1] = rij[1] * b[4]; br[2] = rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double *kkpool = cache; double *kkx = kkpool; double *kky = kkx + nx; double *kkz = kky + ny; double complex *zbuf = (double complex *)(kkz + nz); double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXP_CUTOFF * aij * 4; int n, ix, iy, iz; double Gr; for (n = 0; n < nx+ny+nz; n++) { kkpool[n] = -1; } for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (kkx[ix] < 0) { Gr = Gxbase[ix] * br[0] + kr[0]; kkx[ix] = .25 * kx[n]*kx[n] / aij; csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kky[iy] < 0) { Gr = Gybase[iy] * br[1] + kr[1]; kky[iy] = .25 * ky[n]*ky[n] / aij; csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkz[iz] < 0) { Gr = Gzbase[iz] * br[2] + kr[2]; kkz[iz] = .25 * kz[n]*kz[n] / aij; csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) { out[n] = csx[ix] * csy[iy] * csz[iz]; } else { out[n] = 0; } } } void GTO_Gv_nonorth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[0] += rij[1] * b[1]; br[0] += rij[2] * b[2]; br[1] = rij[0] * b[3]; br[1] += rij[1] * b[4]; br[1] += rij[2] * b[5]; br[2] = rij[0] * b[6]; br[2] += rij[1] * b[7]; br[2] += rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex *zbuf = (double complex *)cache; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; int n; int8_t *empty = (int8_t *)(csz + nz); int8_t *xempty = empty; int8_t *yempty = xempty + nx; int8_t *zempty = yempty + ny; for (n = 0; n < nx+ny+nz; n++) { empty[n] = 1; } int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXP_CUTOFF * aij * 4; int ix, iy, iz; double Gr, kk; for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (xempty[ix]) { Gr = Gxbase[ix] * br[0] + kr[0]; csx[ix] = cos(Gr)-sin(Gr)*_Complex_I; xempty[ix] = 0; } if (yempty[iy]) { Gr = Gybase[iy] * br[1] + kr[1]; csy[iy] = cos(Gr)-sin(Gr)*_Complex_I; yempty[iy] = 0; } if (zempty[iz]) { Gr = Gzbase[iz] * br[2] + kr[2]; csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I); zempty[iz] = 0; } out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz]; } else { out[n] = 0; } } } static void zcopy_ij(double complex *out, const double complex *gctr, const int mi, const int mj, const int ni, const size_t NGv) { int i, j, k; for (j = 0; j < mj; j++) { for (i = 0; i < mi; i++) { for (k = 0; k < NGv; k++) { out[i*NGv+k] = gctr[i*NGv+k]; } } out += ni * NGv; gctr += mi * NGv; } } void GTO_ft_c2s_cart(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int nfi = envs->nfi; const int nfj = envs->nfj; const int ni = nfi*i_ctr; const int nj = nfj*j_ctr; const int nf = envs->nf; int ic, jc; double complex *pout; for (jc = 0; jc < nj; jc += nfj) { for (ic = 0; ic < ni; ic += nfi) { pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, gctr, nfi, nfj, dims[0], NGv); gctr += nf * NGv; } } } #define C2S(sph, nket, cart, l) \ (double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l) #define OF_CMPLX 2 void GTO_ft_c2s_sph(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int di = i_l * 2 + 1; const int dj = j_l * 2 + 1; const int ni = di*i_ctr; const int nj = dj*j_ctr; const int nfi = envs->nfi; const int nf = envs->nf; int ic, jc, k; const int buflen = nfi*dj; double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv); if (buf1 == NULL) { fprintf(stderr, "buf1 = malloc(%zu) falied in GTO_ft_c2s_sph\n", sizeof(double complex) * buflen*2 * NGv); } double complex *buf2 = buf1 + buflen * NGv; double complex *pout, *pij, *buf; for (jc = 0; jc < nj; jc += dj) { for (ic = 0; ic < ni; ic += di) { buf = C2S(buf1, nfi*NGv*OF_CMPLX, gctr, j_l); pij = C2S(buf2, NGv*OF_CMPLX, buf, i_l); for (k = NGv; k < dj*NGv; k+=NGv) { pout = C2S(buf2+k*di, NGv*OF_CMPLX, buf+k*nfi, i_l); } pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, pij, di, dj, dims[0], NGv); gctr += nf * NGv; } } free(buf1); } static void _ft_zset0(double complex *out, int *dims, int *counts, int comp, size_t NGv) { double complex *pout; int i, j, k, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < counts[1]; j++) { pout = out + j * dims[0] * NGv; for (i = 0; i < counts[0]; i++) { for (k = 0; k < NGv; k++) { pout[i*NGv+k] = 0; } } } out += dims[0] * dims[1] * NGv; } } /************************************************* * * eval_aopair is one of GTO_aopair_early_contract, * GTO_aopair_lazy_contract * * eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth, * GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth * *************************************************/ int GTO_ft_aopair_drv(double complex *out, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, void (*f_c2s)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, CINTEnvVars *envs) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const size_t nc = envs->nf * i_ctr * j_ctr * NGv; double complex *gctr = malloc(sizeof(double complex) * nc * n_comp); if (gctr == NULL) { fprintf(stderr, "gctr = malloc(%zu) falied in GTO_ft_aopair_drv\n", sizeof(double complex) * nc * n_comp); } double *cache = malloc(sizeof(double) * (gs[0] + gs[1] + gs[2]) * 3); if (cache == NULL) { fprintf(stderr, "cache = malloc(%zu) falied in GTO_ft_aopair_drv\n", sizeof(double complex) * (gs[0] + gs[1] + gs[2]) * 3); } if (eval_gz == NULL) { eval_gz = GTO_Gv_general; } if (eval_gz != GTO_Gv_general) { assert(gxyz != NULL); } if (eval_aopair == NULL) { const int *shls = envs->shls; const int *bas = envs->bas; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); if (i_prim*j_prim < i_ctr*j_ctr*3) { eval_aopair = GTO_aopair_lazy_contract; } else { eval_aopair = GTO_aopair_early_contract; } } int has_value = (*eval_aopair)(gctr, envs, eval_gz, fac, Gv, b, gxyz, gs, NGv, cache); int counts[4]; if (f_c2s == &GTO_ft_c2s_sph) { counts[0] = (envs->i_l*2+1) * i_ctr; counts[1] = (envs->j_l*2+1) * j_ctr; } else { // f_c2s == &GTO_ft_c2s_cart counts[0] = envs->nfi * i_ctr; counts[1] = envs->nfj * j_ctr; } if (dims == NULL) { dims = counts; } size_t nout = dims[0] * dims[1] * NGv; int n; if (has_value) { for (n = 0; n < n_comp; n++) { (*f_c2s)(out+nout*n, gctr+nc*n, dims, envs, NGv); } } else { _ft_zset0(out, dims, counts, n_comp, NGv); } free(gctr); free(cache); return has_value; } int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_cart, fac, Gv, b, gxyz, gs, nGv, &envs); } int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_sph, fac, Gv, b, gxyz, gs, nGv, &envs); } /************************************************* * *************************************************/ static void zcopy_s2_igtj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } static void zcopy_s2_ieqj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } void GTO_ft_fill_s1(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*nGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } void GTO_ft_fill_s1hermi(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*NGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp && ish0 == jsh0 && ish1 == jsh1) { const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double complex *in = mat + off * NGv; double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] + (ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv; int i, j, n, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*nrow+i); pout = out + NGv * (i*nrow+j); for (n = 0; n < nGv; n++) { pout[n] = pin[n]; } } } out += nrow * ncol * NGv; } } } void GTO_ft_fill_s2(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int i0 = ao_loc[ish0]; const size_t off0 = i0 * (i0 + 1) / 2; const size_t off = ip * (ip + 1) / 2 - off0 + jp; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; (*intor)(buf, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp) { zcopy_s2_igtj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } else { zcopy_s2_ieqj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } } /* * Fourier transform AO pairs and add to mat (inplace) */ void GTO_ft_fill_drv(int (*intor)(), FPtr_eval_gz eval_gz, void (*fill)(), double complex *mat, int comp, int *shls_slice, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const double complex fac = cos(phase) + sin(phase)*_Complex_I; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } size_t di = GTOmax_shell_dim(ao_loc, shls_slice , 1); size_t dj = GTOmax_shell_dim(ao_loc, shls_slice+2, 1); #pragma omp parallel { int i, j, ij; double complex *buf = malloc(sizeof(double complex) * di*dj*comp*(size_t)nGv); if (buf == NULL) { fprintf(stderr, "buf = malloc(%zu) falied in GTO_ft_fill_drv\n", di*dj*comp*(size_t)nGv); } #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_aopair, eval_gz, mat, comp, i, j, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); } } /* * Given npair of shls in shls_lst, FT their AO pair value and add to * out (inplace) */ void GTO_ft_fill_shls_drv(int (*intor)(), FPtr_eval_gz eval_gz, double complex *out, int comp, int npair, int *shls_lst, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int n, di, dj, ish, jsh; int *ijloc = malloc(sizeof(int) * npair); ijloc[0] = 0; for (n = 1; n < npair; n++) { ish = shls_lst[n*2-2]; jsh = shls_lst[n*2-1]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; ijloc[n] = ijloc[n-1] + di*dj; } const double complex fac = cos(phase) + sin(phase)*_Complex_I; const size_t NGv = nGv; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel private(n) { int ish, jsh; int dims[2]; #pragma omp for schedule(dynamic) for (n = 0; n < npair; n++) { ish = shls_lst[n*2 ]; jsh = shls_lst[n*2+1]; dims[0] = ao_loc[ish+1] - ao_loc[ish]; dims[1] = ao_loc[jsh+1] - ao_loc[jsh]; (*intor)(out+ijloc[n]*comp*NGv, shls_lst+n*2, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } } free(ijloc); } /* * Reversed vrr2d. They are used by numint_uniform_grid.c */ void GTOplain_vrr2d_ket_inc1(double *out, const double *g, double *rirj, int li, int lj) { if (lj == 0) { NPdcopy(out, g, _LEN_CART[li]); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double *g00 = g; const double *g10 = g + row_00*col_00; int i, j; const double *p00, *p10; double *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p01[i] = p10[0] + rirj[0] * p00[0]; } p01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p01[i] = p10[0] + rirj[1] * p00[0]; } p01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p01[i] = p10[0] + rirj[2] * p00[0]; } } } void GTOreverse_vrr2d_ket_inc1(double *g01, double *g00, double *rirj, int li, int lj) { const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; double *g10 = g00 + row_00*col_00; double *p00, *p10; int i, j; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[0]; } g01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[1]; } g01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[2]; } } }
GB_binop__lt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_int64) // A.*B function (eWiseMult): GB (_AemultB_08__lt_int64) // A.*B function (eWiseMult): GB (_AemultB_02__lt_int64) // A.*B function (eWiseMult): GB (_AemultB_04__lt_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int64) // A*D function (colscale): GB (_AxD__lt_int64) // D*A function (rowscale): GB (_DxB__lt_int64) // C+=B function (dense accum): GB (_Cdense_accumB__lt_int64) // C+=b function (dense accum): GB (_Cdense_accumb__lt_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int64) // C=scalar+B GB (_bind1st__lt_int64) // C=scalar+B' GB (_bind1st_tran__lt_int64) // C=A+scalar GB (_bind2nd__lt_int64) // C=A'+scalar GB (_bind2nd_tran__lt_int64) // C type: bool // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT64 || GxB_NO_LT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Clustering.h
// // Copyright (C) 2015-2020 Yahoo Japan Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #include "NGT/Index.h" using namespace std; #if defined(NGT_AVX_DISABLED) #define NGT_CLUSTER_NO_AVX #else #if defined(__AVX2__) #define NGT_CLUSTER_AVX2 #else #define NGT_CLUSTER_NO_AVX #endif #endif #if defined(NGT_CLUSTER_NO_AVX) #warning "*** SIMD is *NOT* available! ***" #else #include <immintrin.h> #endif #include <omp.h> #include <random> namespace NGT { class Clustering { public: enum InitializationMode { InitializationModeHead = 0, InitializationModeRandom = 1, InitializationModeKmeansPlusPlus = 2 }; enum ClusteringType { ClusteringTypeKmeansWithNGT = 0, ClusteringTypeKmeansWithoutNGT = 1, ClusteringTypeKmeansWithIteration = 2, ClusteringTypeKmeansWithNGTForCentroids = 3 }; class Entry { public: Entry():vectorID(0), centroidID(0), distance(0.0) {} Entry(size_t vid, size_t cid, double d):vectorID(vid), centroidID(cid), distance(d) {} bool operator<(const Entry &e) const {return distance > e.distance;} uint32_t vectorID; uint32_t centroidID; double distance; }; class DescendingEntry { public: DescendingEntry(size_t vid, double d):vectorID(vid), distance(d) {} bool operator<(const DescendingEntry &e) const {return distance < e.distance;} size_t vectorID; double distance; }; class Cluster { public: Cluster(std::vector<float> &c):centroid(c), radius(0.0) {} Cluster(const Cluster &c) { *this = c; } Cluster &operator=(const Cluster &c) { members = c.members; centroid = c.centroid; radius = c.radius; return *this; } std::vector<Entry> members; std::vector<float> centroid; double radius; }; Clustering(InitializationMode im = InitializationModeHead, ClusteringType ct = ClusteringTypeKmeansWithNGT, size_t mi = 100): clusteringType(ct), initializationMode(im), maximumIteration(mi) { initialize(); } void initialize() { epsilonFrom = 0.12; epsilonTo = epsilonFrom; epsilonStep = 0.04; resultSizeCoefficient = 5; } static void convert(std::vector<std::string> &strings, std::vector<float> &vector) { vector.clear(); for (auto it = strings.begin(); it != strings.end(); ++it) { vector.push_back(stod(*it)); } } static void extractVector(const std::string &str, std::vector<float> &vec) { std::vector<std::string> tokens; NGT::Common::tokenize(str, tokens, " \t"); convert(tokens, vec); } static void loadVectors(const std::string &file, std::vector<std::vector<float> > &vectors) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadVectors::Cannot open " + file ); } std::string line; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); vectors.push_back(v); } } static void saveVectors(const std::string &file, std::vector<std::vector<float> > &vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { std::vector<float> &v = *vit; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } static void saveVector(const std::string &file, std::vector<size_t> &vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { os << *vit << std::endl; } } static void loadClusters(const std::string &file, std::vector<Cluster> &clusters, size_t numberOfClusters = 0) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadClusters::Cannot open " + file); } std::string line; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); clusters.push_back(v); if ((numberOfClusters != 0) && (clusters.size() >= numberOfClusters)) { break; } } if ((numberOfClusters != 0) && (clusters.size() < numberOfClusters)) { std::cerr << "initial cluster data are not enough. " << clusters.size() << ":" << numberOfClusters << std::endl; exit(1); } } #if !defined(NGT_CLUSTER_NO_AVX) static double sumOfSquares(float *a, float *b, size_t size) { __m256 sum = _mm256_setzero_ps(); float *last = a + size; float *lastgroup = last - 7; while (a < lastgroup) { __m256 v = _mm256_sub_ps(_mm256_loadu_ps(a), _mm256_loadu_ps(b)); sum = _mm256_add_ps(sum, _mm256_mul_ps(v, v)); a += 8; b += 8; } __attribute__((aligned(32))) float f[8]; _mm256_store_ps(f, sum); double s = f[0] + f[1] + f[2] + f[3] + f[4] + f[5] + f[6] + f[7]; while (a < last) { double d = *a++ - *b++; s += d * d; } return s; } #else // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double sumOfSquares(float *a, float *b, size_t size) { double csum = 0.0; float *x = a; float *y = b; for (size_t i = 0; i < size; i++) { double d = (double)*x++ - (double)*y++; csum += d * d; } return csum; } #endif // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double distanceL2(std::vector<float> &vector1, std::vector<float> &vector2) { return sqrt(sumOfSquares(&vector1[0], &vector2[0], vector1.size())); } static double distanceL2(std::vector<std::vector<float> > &vector1, std::vector<std::vector<float> > &vector2) { assert(vector1.size() == vector2.size()); double distance = 0.0; for (size_t i = 0; i < vector1.size(); i++) { distance += distanceL2(vector1[i], vector2[i]); } distance /= (double)vector1.size(); return distance; } static double meanSumOfSquares(std::vector<float> &vector1, std::vector<float> &vector2) { return sumOfSquares(&vector1[0], &vector2[0], vector1.size()) / (double)vector1.size(); } static void subtract(std::vector<float> &a, std::vector<float> &b) { assert(a.size() == b.size()); auto bit = b.begin(); for (auto ait = a.begin(); ait != a.end(); ++ait, ++bit) { *ait = *ait - *bit; } } static void getInitialCentroidsFromHead(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); for (size_t i = 0; i < size; i++) { clusters.push_back(Cluster(vectors[i])); } } static void getInitialCentroidsRandomly(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size, size_t seed) { clusters.clear(); std::random_device rnd; if (seed == 0) { seed = rnd(); } std::mt19937 mt(seed); for (size_t i = 0; i < size; i++) { size_t idx = mt() * vectors.size() / mt.max(); if (idx >= size) { i--; continue; } clusters.push_back(Cluster(vectors[idx])); } assert(clusters.size() == size); } static void getInitialCentroidsKmeansPlusPlus(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); std::random_device rnd; std::mt19937 mt(rnd()); size_t idx = (long long)mt() * (long long)vectors.size() / (long long)mt.max(); clusters.push_back(Cluster(vectors[idx])); NGT::Timer timer; for (size_t k = 1; k < size; k++) { double sum = 0; std::priority_queue<DescendingEntry> sortedObjects; // get d^2 and sort #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; double mind = DBL_MAX; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); d *= d; if (d < mind) { mind = d; } } #pragma omp critical { sortedObjects.push(DescendingEntry(distance(vectors.begin(), vit), mind)); sum += mind; } } double l = (double)mt() / (double)mt.max() * sum; while (!sortedObjects.empty()) { sum -= sortedObjects.top().distance; if (l >= sum) { clusters.push_back(Cluster(vectors[sortedObjects.top().vectorID])); break; } sortedObjects.pop(); } } } static void assign(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t clusterSize = std::numeric_limits<size_t>::max()) { // compute distances to the nearest clusters, and construct heap by the distances. NGT::Timer timer; timer.start(); std::vector<Entry> sortedObjects(vectors.size()); #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } sortedObjects[vi] = Entry(vi, mincidx, mind); } } std::sort(sortedObjects.begin(), sortedObjects.end()); // clear for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } // distribute objects to the nearest clusters in the same size constraint. for (auto soi = sortedObjects.rbegin(); soi != sortedObjects.rend();) { Entry &entry = *soi; if (entry.centroidID >= clusters.size()) { std::cerr << "Something wrong. " << entry.centroidID << ":" << clusters.size() << std::endl; soi++; continue; } if (clusters[entry.centroidID].members.size() < clusterSize) { clusters[entry.centroidID].members.push_back(entry); soi++; } else { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() >= clusterSize) { continue; } double d = distanceL2(vectors[entry.vectorID], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } entry = Entry(entry.vectorID, mincidx, mind); int pt = distance(sortedObjects.rbegin(), soi); std::sort(sortedObjects.begin(), soi.base()); soi = sortedObjects.rbegin() + pt; assert(pt == distance(sortedObjects.rbegin(), soi)); } } moveFartherObjectsToEmptyClusters(clusters); } static void moveFartherObjectsToEmptyClusters(std::vector<Cluster> &clusters) { size_t emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; double max = 0.0; auto maxit = clusters.begin(); for (auto scit = clusters.begin(); scit != clusters.end(); ++scit) { if ((*scit).members.size() >= 2 && (*scit).members.back().distance > max) { maxit = scit; max = (*scit).members.back().distance; } } (*cit).members.push_back((*maxit).members.back()); (*cit).members.back().centroidID = distance(clusters.begin(), cit); (*maxit).members.pop_back(); } } emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; } } } static void assignWithNGT(NGT::Index &index, std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, float &radius, size_t &resultSize, float epsilon = 0.12, size_t notRetrievedObjectCount = 0) { size_t dataSize = vectors.size(); assert(index.getObjectRepositorySize() - 1 == vectors.size()); vector<vector<Entry> > results(clusters.size()); #pragma omp parallel for for (size_t ci = 0; ci < clusters.size(); ci++) { auto cit = clusters.begin() + ci; NGT::ObjectDistances objects; // result set NGT::Object *query = 0; query = index.allocateObject((*cit).centroid); // set search prameters. NGT::SearchContainer sc(*query); // search parametera container. sc.setResults(&objects); // set the result set. sc.setEpsilon(epsilon); // set exploration coefficient. if (radius > 0.0) { sc.setRadius(radius); sc.setSize(dataSize / 2); } else { sc.setSize(resultSize); // the number of resultant objects. } index.search(sc); results[ci].reserve(objects.size()); for (size_t idx = 0; idx < objects.size(); idx++) { size_t oidx = objects[idx].id - 1; results[ci].push_back(Entry(oidx, ci, objects[idx].distance)); } index.deleteObject(query); } size_t resultCount = 0; for (auto ri = results.begin(); ri != results.end(); ++ri) { resultCount += (*ri).size(); } vector<Entry> sortedResults; sortedResults.reserve(resultCount); for (auto ri = results.begin(); ri != results.end(); ++ri) { auto end = (*ri).begin(); for (; end != (*ri).end(); ++end) { } std::copy((*ri).begin(), end, std::back_inserter(sortedResults)); } vector<bool> processedObjects(dataSize, false); for (auto i = sortedResults.begin(); i != sortedResults.end(); ++i) { processedObjects[(*i).vectorID] = true; } notRetrievedObjectCount = 0; vector<uint32_t> notRetrievedObjectIDs; for (size_t idx = 0; idx < dataSize; idx++) { if (!processedObjects[idx]) { notRetrievedObjectCount++; notRetrievedObjectIDs.push_back(idx); } } sort(sortedResults.begin(), sortedResults.end()); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } for (auto i = sortedResults.rbegin(); i != sortedResults.rend(); ++i) { size_t objectID = (*i).vectorID; size_t clusterID = (*i).centroidID; if (processedObjects[objectID]) { processedObjects[objectID] = false; clusters[clusterID].members.push_back(*i); clusters[clusterID].members.back().centroidID = clusterID; radius = (*i).distance; } } vector<Entry> notRetrievedObjects(notRetrievedObjectIDs.size()); #pragma omp parallel for for (size_t vi = 0; vi < notRetrievedObjectIDs.size(); vi++) { auto vit = notRetrievedObjectIDs.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(vectors[*vit], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } notRetrievedObjects[vi] = Entry(*vit, mincidx, mind); // Entry(vectorID, centroidID, distance) } } sort(notRetrievedObjects.begin(), notRetrievedObjects.end()); for (auto nroit = notRetrievedObjects.begin(); nroit != notRetrievedObjects.end(); ++nroit) { clusters[(*nroit).centroidID].members.push_back(*nroit); } moveFartherObjectsToEmptyClusters(clusters); } static double calculateCentroid(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters) { double distance = 0; size_t memberCount = 0; for (auto it = clusters.begin(); it != clusters.end(); ++it) { memberCount += (*it).members.size(); if ((*it).members.size() != 0) { std::vector<float> mean(vectors[0].size(), 0.0); for (auto memit = (*it).members.begin(); memit != (*it).members.end(); ++memit) { auto mit = mean.begin(); auto &v = vectors[(*memit).vectorID]; for (auto vit = v.begin(); vit != v.end(); ++vit, ++mit) { *mit += *vit; } } for (auto mit = mean.begin(); mit != mean.end(); ++mit) { *mit /= (*it).members.size(); } distance += distanceL2((*it).centroid, mean); (*it).centroid = mean; } else { cerr << "Clustering: Fatal Error. No member!" << endl; abort(); } } return distance; } static void saveClusters(const std::string &file, std::vector<Cluster> &clusters) { std::ofstream os(file); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { std::vector<float> &v = (*cit).centroid; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } double kmeansWithoutNGT(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { size_t clusterSize = std::numeric_limits<size_t>::max(); if (clusterSizeConstraint) { clusterSize = ceil((double)vectors.size() / (double)numberOfClusters); } double diff = 0; for (size_t i = 0; i < maximumIteration; i++) { std::cerr << "iteration=" << i << std::endl; assign(vectors, clusters, clusterSize); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. diff = calculateCentroid(vectors, clusters); if (diff == 0) { break; } } return diff == 0; } double kmeansWithNGT(NGT::Index &index, std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters, float epsilon) { diffHistory.clear(); NGT::Timer timer; timer.start(); float radius; double diff = 0.0; size_t resultSize; resultSize = resultSizeCoefficient * vectors.size() / clusters.size(); for (size_t i = 0; i < maximumIteration; i++) { size_t notRetrievedObjectCount = 0; radius = -1.0; assignWithNGT(index, vectors, clusters, radius, resultSize, epsilon, notRetrievedObjectCount); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. std::vector<Cluster> prevClusters = clusters; diff = calculateCentroid(vectors, clusters); timer.stop(); std::cerr << "iteration=" << i << " time=" << timer << " diff=" << diff << std::endl; timer.start(); diffHistory.push_back(diff); if (diff == 0) { break; } } return diff; } double kmeansWithNGT(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { pid_t pid = getpid(); std::stringstream str; str << "cluster-ngt." << pid; string database = str.str(); string dataFile; size_t dataSize = 0; size_t dim = clusters.front().centroid.size(); NGT::Property property; property.dimension = dim; property.graphType = NGT::Property::GraphType::GraphTypeANNG; property.objectType = NGT::Index::Property::ObjectType::Float; property.distanceType = NGT::Index::Property::DistanceType::DistanceTypeL2; NGT::Index::createGraphAndTree(database, property, dataFile, dataSize); float *data = new float[vectors.size() * dim]; float *ptr = data; dataSize = vectors.size(); for (auto vi = vectors.begin(); vi != vectors.end(); ++vi) { memcpy(ptr, &((*vi)[0]), dim * sizeof(float)); ptr += dim; } size_t threadSize = 20; NGT::Index::append(database, data, dataSize, threadSize); delete[] data; NGT::Index index(database); return kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilonFrom); } double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters, std::vector<Cluster> &clusters) { NGT::GraphIndex &graph = static_cast<NGT::GraphIndex&>(index.getIndex()); NGT::ObjectSpace &os = graph.getObjectSpace(); size_t size = os.getRepository().size(); std::vector<std::vector<float> > vectors(size - 1); for (size_t idx = 1; idx < size; idx++) { try { os.getObject(idx, vectors[idx - 1]); } catch(...) { cerr << "Cannot get object " << idx << endl; } } cerr << "# of data for clustering=" << vectors.size() << endl; double diff = DBL_MAX; clusters.clear(); setupInitialClusters(vectors, numberOfClusters, clusters); for (float epsilon = epsilonFrom; epsilon <= epsilonTo; epsilon += epsilonStep) { cerr << "epsilon=" << epsilon << endl; diff = kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilon); if (diff == 0.0) { return diff; } } return diff; } double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters, NGT::Index &outIndex) { std::vector<Cluster> clusters; double diff = kmeansWithNGT(index, numberOfClusters, clusters); for (auto i = clusters.begin(); i != clusters.end(); ++i) { outIndex.insert((*i).centroid); } outIndex.createIndex(16); return diff; } double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters) { NGT::Property prop; index.getProperty(prop); string path = index.getPath(); index.save(); index.close(); string outIndexName = path; string inIndexName = path + ".tmp"; std::rename(outIndexName.c_str(), inIndexName.c_str()); NGT::Index::createGraphAndTree(outIndexName, prop); index.open(outIndexName); NGT::Index inIndex(inIndexName); double diff = kmeansWithNGT(inIndex, numberOfClusters, index); inIndex.close(); NGT::Index::destroy(inIndexName); return diff; } double kmeansWithNGT(string &indexName, size_t numberOfClusters) { NGT::Index inIndex(indexName); double diff = kmeansWithNGT(inIndex, numberOfClusters); inIndex.save(); inIndex.close(); return diff; } static double calculateMSE(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters) { double mse = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { mse += meanSumOfSquares((*cit).centroid, vectors[(*mit).vectorID]); } } assert(vectors.size() == count); return mse / (double)vectors.size(); } static double calculateML2(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters) { double d = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); double localD= 0.0; for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { double distance = distanceL2((*cit).centroid, vectors[(*mit).vectorID]); d += distance; localD += distance; } } if (vectors.size() != count) { std::cerr << "Warning! vectors.size() != count" << std::endl; } return d / (double)vectors.size(); } static double calculateML2FromSpecifiedCentroids(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, std::vector<size_t> &centroidIds) { double d = 0.0; size_t count = 0; for (auto it = centroidIds.begin(); it != centroidIds.end(); ++it) { Cluster &cluster = clusters[(*it)]; count += cluster.members.size(); for (auto mit = cluster.members.begin(); mit != cluster.members.end(); ++mit) { d += distanceL2(cluster.centroid, vectors[(*mit).vectorID]); } } return d / (double)vectors.size(); } void setupInitialClusters(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { if (clusters.empty()) { switch (initializationMode) { case InitializationModeHead: { getInitialCentroidsFromHead(vectors, clusters, numberOfClusters); break; } case InitializationModeRandom: { getInitialCentroidsRandomly(vectors, clusters, numberOfClusters, 0); break; } case InitializationModeKmeansPlusPlus: { getInitialCentroidsKmeansPlusPlus(vectors, clusters, numberOfClusters); break; } default: std::cerr << "proper initMode is not specified." << std::endl; exit(1); } } } bool kmeans(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { setupInitialClusters(vectors, numberOfClusters, clusters); switch (clusteringType) { case ClusteringTypeKmeansWithoutNGT: return kmeansWithoutNGT(vectors, numberOfClusters, clusters); break; case ClusteringTypeKmeansWithNGT: return kmeansWithNGT(vectors, numberOfClusters, clusters); break; default: cerr << "kmeans::fatal error!. invalid clustering type. " << clusteringType << endl; abort(); break; } } static void evaluate(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, char mode, std::vector<size_t> centroidIds = std::vector<size_t>()) { size_t clusterSize = std::numeric_limits<size_t>::max(); assign(vectors, clusters, clusterSize); std::cout << "The number of vectors=" << vectors.size() << std::endl; std::cout << "The number of centroids=" << clusters.size() << std::endl; if (centroidIds.size() == 0) { switch (mode) { case 'e': std::cout << "MSE=" << calculateMSE(vectors, clusters) << std::endl; break; case '2': default: std::cout << "ML2=" << calculateML2(vectors, clusters) << std::endl; break; } } else { switch (mode) { case 'e': break; case '2': default: std::cout << "ML2=" << calculateML2FromSpecifiedCentroids(vectors, clusters, centroidIds) << std::endl; break; } } } ClusteringType clusteringType; InitializationMode initializationMode; size_t numberOfClusters; bool clusterSizeConstraint; size_t maximumIteration; float epsilonFrom; float epsilonTo; float epsilonStep; size_t resultSizeCoefficient; vector<double> diffHistory; }; }
utils.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <iostream> #include <vector> #include <string> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #ifdef _WIN32 #include <filesystem> #else #include <dirent.h> #include <sys/types.h> #endif namespace PaddleSolution { namespace utils { inline std::string path_join(const std::string& dir, const std::string& path) { std::string seperator = "/"; #ifdef _WIN32 seperator = "\\"; #endif return dir + seperator + path; } #ifndef _WIN32 // scan a directory and get all files with input extensions inline std::vector<std::string> get_directory_images( const std::string& path, const std::string& exts) { std::vector<std::string> imgs; struct dirent *entry; DIR *dir = opendir(path.c_str()); if (dir == NULL) { closedir(dir); return imgs; } while ((entry = readdir(dir)) != NULL) { std::string item = entry->d_name; auto ext = strrchr(entry->d_name, '.'); if (!ext || std::string(ext) == "." || std::string(ext) == "..") { continue; } if (exts.find(ext) != std::string::npos) { imgs.push_back(path_join(path, entry->d_name)); } } return imgs; } #else // scan a directory and get all files with input extensions inline std::vector<std::string> get_directory_images( const std::string& path, const std::string& exts) { std::vector<std::string> imgs; for (const auto& item : std::experimental::filesystem::directory_iterator(path)) { auto suffix = item.path().extension().string(); if (exts.find(suffix) != std::string::npos && suffix.size() > 0) { auto fullname = path_join(path, item.path().filename().string()); imgs.push_back(item.path().string()); } } return imgs; } #endif // normalize and HWC_BGR -> CHW_RGB inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean, std::vector<float>& fstd) { int rh = im.rows; int rw = im.cols; int rc = im.channels(); double normf = static_cast<double>(1.0) / 255.0; #pragma omp parallel for for (int h = 0; h < rh; ++h) { const uchar* ptr = im.ptr<uchar>(h); int im_index = 0; for (int w = 0; w < rw; ++w) { for (int c = 0; c < rc; ++c) { int top_index = (c * rh + h) * rw + w; float pixel = static_cast<float>(ptr[im_index++]); pixel = (pixel * normf - fmean[c]) / fstd[c]; data[top_index] = pixel; } } } } // argmax inline void argmax(float* out, std::vector<int>& shape, std::vector<uchar>& mask, std::vector<uchar>& scoremap) { int out_img_len = shape[1] * shape[2]; int blob_out_len = out_img_len * shape[0]; /* Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]); Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0); */ float max_value = -1; int label = 0; #pragma omp parallel private(label) for (int i = 0; i < out_img_len; ++i) { max_value = -1; label = 0; #pragma omp for reduction(max : max_value) for (int j = 0; j < shape[0]; ++j) { int index = i + j * out_img_len; if (index >= blob_out_len) { continue; } float value = out[index]; if (value > max_value) { max_value = value; label = j; } } if (label == 0) max_value = 0; mask[i] = uchar(label); scoremap[i] = uchar(max_value * 255); } } } // namespace utils } // namespace PaddleSolution
tanh.c
#include <cdnn/activations.h> #include <cdnn/model.h> extern __Model__ * m; dARRAY * forward_pass_tanh(){ dARRAY * tanh_out = (dARRAY*)malloc(sizeof(dARRAY)); tanh_out->matrix = (float*)calloc(m->current_layer->DENSE->cache->shape[0]*m->current_layer->DENSE->cache->shape[1],sizeof(float)); omp_set_num_threads(8); #pragma omp parallel for num_threads(8) shared(m,tanh_out) schedule(static) for(int i=0;i<m->current_layer->DENSE->cache->shape[0]*m->current_layer->DENSE->cache->shape[1];i++){ //Computing the tanh function float exp_res1 = exp(m->current_layer->DENSE->cache->matrix[i]); float exp_res2 = exp(-1*m->current_layer->DENSE->cache->matrix[i]); tanh_out->matrix[i] = (exp_res1 - exp_res2)/(exp_res1 + exp_res2); } tanh_out->shape[0] = m->current_layer->DENSE->cache->shape[0]; tanh_out->shape[1] = m->current_layer->DENSE->cache->shape[1]; return tanh_out; } dARRAY * backward_pass_tanh(){ dARRAY * tanh_out = (dARRAY*)malloc(sizeof(dARRAY)); tanh_out->matrix = (float*)calloc(m->current_layer->DENSE->A->shape[0]*m->current_layer->DENSE->A->shape[1],sizeof(float)); omp_set_num_threads(8); #pragma omp parallel for num_threads(8) shared(m,tanh_out) schedule(static) for(int i=0;i<m->current_layer->DENSE->A->shape[0]*m->current_layer->DENSE->A->shape[1];i++){ //Computing the tanh function float exp_res1 = exp(m->current_layer->DENSE->A->matrix[i]); float exp_res2 = exp(-1*m->current_layer->DENSE->A->matrix[i]); float temp = (exp_res1 - exp_res2)/(exp_res1 + exp_res2); //gradient of tanh is g'(z) = 1 - (tanh(z))^2 tanh_out->matrix[i] = 1-pow(temp,2.0f); } tanh_out->shape[0] = m->current_layer->DENSE->A->shape[0]; tanh_out->shape[1] = m->current_layer->DENSE->A->shape[1]; return tanh_out; } Tanh * Tanh__init__(dARRAY * linear_matrix){ Tanh * tanh = (Tanh*)malloc(sizeof(Tanh)); tanh->forward = forward_pass_tanh; tanh->backward = backward_pass_tanh; tanh->in_dims[0] = tanh->out_dims[0] = linear_matrix->shape[0]; tanh->in_dims[1] = tanh->out_dims[1] = linear_matrix->shape[1]; return tanh; } dARRAY * (TanH)(Tanh_args args){ Tanh * t = Tanh__init__(args.input); if(!args.status) return t->forward(); else return t->backward(); free(t); t=NULL; }
vednnLinearForward.c
#include "vednnLinearForward.h" #include "vednn-def.h" static inline vednnError_t vednnLinearForward_wrapper( vednnLinearForward_t pFunc, VEDNN_LINEARFWD_ARGS ) { #ifndef VEDNN_USE_OPENMP return pFunc(VEDNN_LINEARFWD_ARGS_LIST); #else if ( __vednn_omp_num_threads == 1 ) { return pFunc(VEDNN_LINEARFWD_ARGS_LIST); } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { uint64_t nthreads = omp_get_num_threads() ; uint64_t threadid = omp_get_thread_num() ; uint64_t nBatchEach = nBatch / nthreads ; uint64_t remain = nBatch % nthreads ; uint64_t batchBegin = nBatchEach * threadid + ( threadid < remain ? threadid : remain ) ; uint64_t myBatch = nBatchEach + ( threadid < remain ? 1 : 0 ) ; if( myBatch == 0 ) { rc |= VEDNN_SUCCESS ; } else { float* _pDataIn = ((float *)pDataIn) + batchBegin * inDim ; float* _pDataOut = ((float *)pDataOut) + batchBegin * outDim ; rc |= pFunc(inDim, outDim, myBatch, _pDataIn, pDataWeight, _pDataOut ) ; } } return rc ; } #endif } /* ----------------------------------------------------------------------- */ vednnError_t vednnLinearForward( VEDNN_LINEARFWD_ARGS ) { #define OMPWRAP( IMPL ) WRAP_RET(vednnLinearForward_##IMPL, \ vednnLinearForward_wrapper, VEDNN_LINEARFWD_ARGS_LIST) if( outDim <= 32 ) OMPWRAP(oU32); else { if( (outDim & 0x01) == 0 && (((uint64_t)pDataWeight) & 0x07) == 0 && (((uint64_t)pDataOut) & 0x07) == 0 ) OMPWRAP(o2X_woaligned); else OMPWRAP(default); } #undef OMPWRAP } // vim: et sw=2 ts=2
main.c
#include "./typos_v.h" #include <omp.h> const int BODY_COUNT = N4; double magnitude(data3D data){ double squareOfLength = 0.0; squareOfLength += data.x * data.x; squareOfLength += data.y * data.y; squareOfLength += data.z * data.z; return sqrt(squareOfLength); } void normalize(data3D data){ double length = magnitude(data); data.x = data.x / length; data.y = data.y / length; data.z = data.z / length; } void invert(data3D data) { data.x *= -1.0; data.y *= -1.0; data.z *= -1.0; } void direction(data3D fromVector, data3D toVector, data3D resultVector) { resultVector.x = toVector.x - fromVector.x; resultVector.y = toVector.y - fromVector.y; resultVector.z = toVector.z - fromVector.z; normalize(resultVector); } double forceNewtonianGravity3D(double onMass, double becauseOfMass, data3D onPosition, data3D becauseOfPosition) { double deltaX = becauseOfPosition.x - onPosition.x; double deltaY = becauseOfPosition.y - onPosition.y; double deltaZ = becauseOfPosition.z - onPosition.z; double distance = sqrt(deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ); if(distance == 0) { return 0; } double result = G * (onMass * becauseOfMass) / (distance * distance); return result; } double computeAccel(double mass, double force ) { if( force == 0 ) { return 0; } double result = force / mass; return result; } double computeVelo(double current, double previous, float deltaT) { return previous + (current * deltaT); } double computePos(double current, double previous, float deltaT) { return previous + (current * deltaT); } data3D computeAccel3D(double mass, data3D force) { data3D anAccelVector = {0, 0, 0}; anAccelVector.x = computeAccel(mass, force.x); anAccelVector.y = computeAccel(mass, force.y); anAccelVector.z = computeAccel(mass, force.z); return anAccelVector; } data3D computeVelo3D(data3D accel, data3D prevVelo, float deltaT) { data3D adoubleVector = {0, 0, 0}; adoubleVector.x = computeVelo( accel.x, prevVelo.x, deltaT ); adoubleVector.y = computeVelo( accel.y, prevVelo.y, deltaT ); adoubleVector.z = computeVelo( accel.z, prevVelo.z, deltaT ); return adoubleVector; } data3D computePos3D(data3D velo, data3D prevPos, float deltaT) { data3D anPositionVector = {0, 0, 0}; anPositionVector.x = computePos(velo.x, prevPos.x, deltaT); anPositionVector.y = computePos(velo.y, prevPos.y, deltaT); anPositionVector.z = computePos(velo.z, prevPos.z, deltaT); return anPositionVector; } void updateAcceleration(int bodyIndex){ data3D netForce = { 0, 0, 0 }; for(int i = 0; i < BODY_COUNT; i++){ data3D vectorForceToOther = {0, 0, 0}; double scalarForceBetween = forceNewtonianGravity3D(mass[bodyIndex], mass[i], position[bodyIndex], position[i]); direction(position[bodyIndex], position[i], vectorForceToOther); vectorForceToOther.x *= scalarForceBetween; vectorForceToOther.y *= scalarForceBetween; vectorForceToOther.z *= scalarForceBetween; netForce.x += vectorForceToOther.x; netForce.y += vectorForceToOther.y; netForce.z += vectorForceToOther.z; } acceleration[bodyIndex] = computeAccel3D(mass[bodyIndex], netForce); } void updateVelocity(int bodyIndex, float deltaT){ speed[bodyIndex] = computeVelo3D(acceleration[bodyIndex], speed[bodyIndex], deltaT); } void updatePosition(int bodyIndex, float deltaT){ position[bodyIndex] = computePos3D(speed[bodyIndex], position[bodyIndex], deltaT); } void displayAll(){ int index; for(index=0; index < BODY_COUNT; index++) { printf("\nBody %d:\nMassa: %f\nPosiçao(x ,y, z): %f, %f, %f\nVelocidade(x, y, z): %f, %f, %f\nAceleracao(x ,y, z): %f, %f, %f\n\n", index + 1, mass[index], position[index].x, position[index].y, position[index].z, speed[index].x, speed[index].y, speed[index].z, acceleration[index].x, acceleration[index].y, acceleration[index].z); } } void updatePhysics(float deltaT){ int i; #pragma omp parallel for num_threads(8) for(i=0; i < BODY_COUNT; i++) { updateAcceleration(i); updateVelocity(i, deltaT); updatePosition(i, deltaT); } } void nBodyStart(){ int j = 0; int i = 0; for (j = 0; j < 1; ++j){ for (i = 0; i < 10000; ++i){ updatePhysics(i * 100); } } //displayAll(); } int main(){ nBodyStart(); return 0; }
lu.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU This benchmark is an OpenMP C version of the NPB LU code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: S. Weeratunga V. Venkatakrishnan E. Barszcz M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "applu.h" //#if defined(_OPENMP) /* for thread synchronization */ //static boolean flag[ISIZ1/2*2+1]; //#endif /* _OPENMP */ /* function declarations */ static void blts (int nx, int ny, int nz, int k, double omega, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double ldz[ISIZ1][ISIZ2][5][5], double ldy[ISIZ1][ISIZ2][5][5], double ldx[ISIZ1][ISIZ2][5][5], double d[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ); static void buts(int nx, int ny, int nz, int k, double omega, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double tv[ISIZ1][ISIZ2][5], double d[ISIZ1][ISIZ2][5][5], double udx[ISIZ1][ISIZ2][5][5], double udy[ISIZ1][ISIZ2][5][5], double udz[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ); static void domain(void); static void erhs(void); static void error(void); static void exact( int i, int j, int k, double u000ijk[5] ); static void jacld(int k); static void jacu(int k); static void l2norm (int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double sum[5]); static void pintgr(void); static void read_input(void); static void rhs(void); static void setbv(void); static void setcoeff(void); static void setiv(void); static void ssor(void); static void verify(double xcr[5], double xce[5], double xci, char *class, boolean *verified); /*-------------------------------------------------------------------- program applu --------------------------------------------------------------------*/ int main(int argc, char **argv) { /*-------------------------------------------------------------------- c c driver for the performance evaluation of the solver for c five coupled parabolic/elliptic partial differential equations. c --------------------------------------------------------------------*/ char class; boolean verified; double mflops; int nthreads = 1; /*-------------------------------------------------------------------- c read input data --------------------------------------------------------------------*/ read_input(); /*-------------------------------------------------------------------- c set up domain sizes --------------------------------------------------------------------*/ domain(); /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ setcoeff(); /*-------------------------------------------------------------------- c set the boundary values for dependent variables --------------------------------------------------------------------*/ setbv(); /*-------------------------------------------------------------------- c set the initial values for dependent variables --------------------------------------------------------------------*/ setiv(); /*-------------------------------------------------------------------- c compute the forcing term based on prescribed exact solution --------------------------------------------------------------------*/ erhs(); { //#if defined(_OPENMP) // nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ } /*-------------------------------------------------------------------- c perform the SSOR iterations --------------------------------------------------------------------*/ ssor(); /*-------------------------------------------------------------------- c compute the solution error --------------------------------------------------------------------*/ error(); /*-------------------------------------------------------------------- c compute the surface integral --------------------------------------------------------------------*/ pintgr(); /*-------------------------------------------------------------------- c verification test --------------------------------------------------------------------*/ verify ( rsdnm, errnm, frc, &class, &verified ); mflops = (double)itmax*(1984.77*(double)nx0 *(double)ny0 *(double)nz0 -10923.3*pow2((double)( nx0+ny0+nz0 )/3.0) +27770.9* (double)( nx0+ny0+nz0 )/3.0 -144010.0) / (maxtime*1000000.0); c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void blts (int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double ldz[ISIZ1][ISIZ2][5][5], double ldy[ISIZ1][ISIZ2][5][5], double ldx[ISIZ1][ISIZ2][5][5], double d[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block lower triangular solution: c c v <-- ( L-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, m; double tmp, tmp1; double tmat[5][5]; #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for for (m = 0; m < 5; m++) { v[i][j][k][m] = v[i][j][k][m] - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0] + ldz[i][j][m][1] * v[i][j][k-1][1] + ldz[i][j][m][2] * v[i][j][k-1][2] + ldz[i][j][m][3] * v[i][j][k-1][3] + ldz[i][j][m][4] * v[i][j][k-1][4] ); } } } for (i = ist; i <= iend; i++) { //#if defined(_OPENMP) // if (i != ist) { // while (flag[i-1] == 0) { // ; // } // } // if (i != iend) { // while (flag[i] == 1) { // ; // } // } //#endif /* _OPENMP */ for (j = jst; j <= jend; j++) { for (m = 0; m < 5; m++) { v[i][j][k][m] = v[i][j][k][m] - omega * ( ldy[i][j][m][0] * v[i][j-1][k][0] + ldx[i][j][m][0] * v[i-1][j][k][0] + ldy[i][j][m][1] * v[i][j-1][k][1] + ldx[i][j][m][1] * v[i-1][j][k][1] + ldy[i][j][m][2] * v[i][j-1][k][2] + ldx[i][j][m][2] * v[i-1][j][k][2] + ldy[i][j][m][3] * v[i][j-1][k][3] + ldx[i][j][m][3] * v[i-1][j][k][3] + ldy[i][j][m][4] * v[i][j-1][k][4] + ldx[i][j][m][4] * v[i-1][j][k][4] ); } /*-------------------------------------------------------------------- c diagonal block inversion c c forward elimination --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; tmp1 = 1.0 / tmat[ 1][1]; tmp = tmp1 * tmat[ 2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } //#if defined(_OPENMP) // if (i != ist) flag[i-1] = 0; // if (i != iend) flag[i] = 1; //#endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void buts(int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double tv[ISIZ1][ISIZ2][5], double d[ISIZ1][ISIZ2][5][5], double udx[ISIZ1][ISIZ2][5][5], double udy[ISIZ1][ISIZ2][5][5], double udz[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block upper triangular solution: c c v <-- ( U-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, m; double tmp, tmp1; double tmat[5][5]; #pragma omp parallel for for (i = iend; i >= ist; i--) { #pragma omp parallel for for (j = jend; j >= jst; j--) { #pragma omp parallel for for (m = 0; m < 5; m++) { tv[i][j][m] = omega * ( udz[i][j][m][0] * v[i][j][k+1][0] + udz[i][j][m][1] * v[i][j][k+1][1] + udz[i][j][m][2] * v[i][j][k+1][2] + udz[i][j][m][3] * v[i][j][k+1][3] + udz[i][j][m][4] * v[i][j][k+1][4] ); } } } for (i = iend; i >= ist; i--) { //#if defined(_OPENMP) // if (i != iend) { // while (flag[i+1] == 0) { // ; // } //// } // if (i != ist) { // while (flag[i] == 1) { // ; // } // } //#endif /* _OPENMP */ for (j = jend; j >= jst; j--) { for (m = 0; m < 5; m++) { tv[i][j][m] = tv[i][j][m] + omega * ( udy[i][j][m][0] * v[i][j+1][k][0] + udx[i][j][m][0] * v[i+1][j][k][0] + udy[i][j][m][1] * v[i][j+1][k][1] + udx[i][j][m][1] * v[i+1][j][k][1] + udy[i][j][m][2] * v[i][j+1][k][2] + udx[i][j][m][2] * v[i+1][j][k][2] + udy[i][j][m][3] * v[i][j+1][k][3] + udx[i][j][m][3] * v[i+1][j][k][3] + udy[i][j][m][4] * v[i][j+1][k][4] + udx[i][j][m][4] * v[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c diagonal block inversion --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; tv[i][j][3] = tv[i][j][3] / tmat[3][3]; tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; tv[i][j][2] = tv[i][j][2] / tmat[2][2]; tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; tv[i][j][1] = tv[i][j][1] / tmat[1][1]; tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; tv[i][j][0] = tv[i][j][0] / tmat[0][0]; v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } //#if defined(_OPENMP) // if (i != iend) flag[i+1] = 0; // if (i != ist) flag[i] = 1; //#endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void domain(void) { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ nx = nx0; ny = ny0; nz = nz0; /*-------------------------------------------------------------------- c check the sub-domain size --------------------------------------------------------------------*/ if ( nx < 4 || ny < 4 || nz < 4 ) { printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz); exit(1); } if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) { printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz); exit(1); } /*-------------------------------------------------------------------- c set up the start and end in i and j extents for all processors --------------------------------------------------------------------*/ ist = 1; iend = nx - 2; jst = 1; jend = ny - 2; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void erhs(void) { { /*-------------------------------------------------------------------- c c compute the right hand side based on exact solution c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; int L1, L2; int ist1, iend1; int jst1, jend1; double dsspm; double xi, eta, zeta; double q; double u21, u31, u41; double tmp; double u21i, u31i, u41i, u51i; double u21j, u31j, u41j, u51j; double u21k, u31k, u41k, u51k; double u21im1, u31im1, u41im1, u51im1; double u21jm1, u31jm1, u41jm1, u51jm1; double u21km1, u31km1, u41km1, u51km1; dsspm = dssp; #pragma omp parallel for for (i = 0; i < nx; i++) { #pragma omp parallel for for (j = 0; j < ny; j++) { #pragma omp parallel for for (k = 0; k < nz; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { frct[i][j][k][m] = 0.0; } } } } #pragma omp parallel for private(iglob, xi) for (i = 0; i < nx; i++) { iglob = i; xi = ( (double)(iglob) ) / ( nx0 - 1 ); #pragma omp parallel for private(jglob, eta) for (j = 0; j < ny; j++) { jglob = j; eta = ( (double)(jglob) ) / ( ny0 - 1 ); #pragma omp parallel for private(zeta) for (k = 0; k < nz; k++) { zeta = ( (double)(k) ) / ( nz - 1 ); for (m = 0; m < 5; m++) { rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx-1; #pragma omp parallel for for (i = L1; i <= L2; i++) { #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for private(u21, q) for (k = 1; k < nz - 1; k++) { flux[i][j][k][0] = rsd[i][j][k][1]; u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][2] = rsd[i][j][k][2] * u21; flux[i][j][k][3] = rsd[i][j][k][3] * u21; flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21; } } } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist) for (j = jst; j <= jend; j++) { #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, i, ist1, iend1, m) firstprivate(jst, ist) for (i = ist; i <= iend; i++) { #pragma omp parallel for for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] ); } } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) firstprivate(ist) for (i = ist; i <= L2; i++) { tmp = 1.0 / rsd[i][j][k][0]; u21i = tmp * rsd[i][j][k][1]; u31i = tmp * rsd[i][j][k][2]; u41i = tmp * rsd[i][j][k][3]; u51i = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i-1][j][k][0]; u21im1 = tmp * rsd[i-1][j][k][1]; u31im1 = tmp * rsd[i-1][j][k][2]; u41im1 = tmp * rsd[i-1][j][k][3]; u51im1 = tmp * rsd[i-1][j][k][4]; flux[i][j][k][1] = (4.0/3.0) * tx3 * ( u21i - u21im1 ); flux[i][j][k][2] = tx3 * ( u31i - u31im1 ); flux[i][j][k][3] = tx3 * ( u41i - u41im1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i ) - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) ) + (1.0/6.0) * tx3 * ( u21i*u21i - u21im1*u21im1 ) + C1 * C5 * tx3 * ( u51i - u51im1 ); } #pragma omp parallel for for (i = ist; i <= iend; i++) { frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * ( rsd[i-1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i+1][j][k][0] ); frct[i][j][k][1] = frct[i][j][k][1] + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] ) + dx2 * tx1 * ( rsd[i-1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i+1][j][k][1] ); frct[i][j][k][2] = frct[i][j][k][2] + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] ) + dx3 * tx1 * ( rsd[i-1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i+1][j][k][2] ); frct[i][j][k][3] = frct[i][j][k][3] + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] ) + dx4 * tx1 * ( rsd[i-1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i+1][j][k][3] ); frct[i][j][k][4] = frct[i][j][k][4] + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] ) + dx5 * tx1 * ( rsd[i-1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[1][j][k][m] = frct[1][j][k][m] - dsspm * ( + 5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m] ); frct[2][j][k][m] = frct[2][j][k][m] - dsspm * ( - 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m] ); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <=iend1; i++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i-2][j][k][m] - 4.0 * rsd[i-1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i+1][j][k][m] + rsd[i+2][j][k][m] ); } } for (m = 0; m < 5; m++) { frct[nx-3][j][k][m] = frct[nx-3][j][k][m] - dsspm * ( rsd[nx-5][j][k][m] - 4.0 * rsd[nx-4][j][k][m] + 6.0 * rsd[nx-3][j][k][m] - 4.0 * rsd[nx-2][j][k][m] ); frct[nx-2][j][k][m] = frct[nx-2][j][k][m] - dsspm * ( rsd[nx-4][j][k][m] - 4.0 * rsd[nx-3][j][k][m] + 5.0 * rsd[nx-2][j][k][m] ); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny-1; #pragma omp parallel for private(u31, q) for (i = ist; i <= iend; i++) { #pragma omp parallel for private(u31, q) //firstprivate(iend ,ist ,k ,ny ,u31 ,q ,nz ,L2 ,i ) for (j = L1; j <= L2; j++) { #pragma omp parallel for private(u31, q) for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = rsd[i][j][k][2]; u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u31; flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][3] = rsd[i][j][k][3] * u31; flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31; } } } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) for (i = ist; i <= iend; i++) { #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] ); } } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) for (j = jst; j <= L2; j++) { tmp = 1.0 / rsd[i][j][k][0]; u21j = tmp * rsd[i][j][k][1]; u31j = tmp * rsd[i][j][k][2]; u41j = tmp * rsd[i][j][k][3]; u51j = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j-1][k][0]; u21jm1 = tmp * rsd[i][j-1][k][1]; u31jm1 = tmp * rsd[i][j-1][k][2]; u41jm1 = tmp * rsd[i][j-1][k][3]; u51jm1 = tmp * rsd[i][j-1][k][4]; flux[i][j][k][1] = ty3 * ( u21j - u21jm1 ); flux[i][j][k][2] = (4.0/3.0) * ty3 * ( u31j - u31jm1 ); flux[i][j][k][3] = ty3 * ( u41j - u41jm1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j ) - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) ) + (1.0/6.0) * ty3 * ( u31j*u31j - u31jm1*u31jm1 ) + C1 * C5 * ty3 * ( u51j - u51jm1 ); } #pragma omp parallel for for (j = jst; j <= jend; j++) { frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * ( rsd[i][j-1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j+1][k][0] ); frct[i][j][k][1] = frct[i][j][k][1] + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] ) + dy2 * ty1 * ( rsd[i][j-1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j+1][k][1] ); frct[i][j][k][2] = frct[i][j][k][2] + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] ) + dy3 * ty1 * ( rsd[i][j-1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j+1][k][2] ); frct[i][j][k][3] = frct[i][j][k][3] + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] ) + dy4 * ty1 * ( rsd[i][j-1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j+1][k][3] ); frct[i][j][k][4] = frct[i][j][k][4] + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] ) + dy5 * ty1 * ( rsd[i][j-1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j+1][k][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[i][1][k][m] = frct[i][1][k][m] - dsspm * ( + 5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m] ); frct[i][2][k][m] = frct[i][2][k][m] - dsspm * ( - 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m] ); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i][j-2][k][m] - 4.0 * rsd[i][j-1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j+1][k][m] + rsd[i][j+2][k][m] ); } } for (m = 0; m < 5; m++) { frct[i][ny-3][k][m] = frct[i][ny-3][k][m] - dsspm * ( rsd[i][ny-5][k][m] - 4.0 * rsd[i][ny-4][k][m] + 6.0 * rsd[i][ny-3][k][m] - 4.0 * rsd[i][ny-2][k][m] ); frct[i][ny-2][k][m] = frct[i][ny-2][k][m] - dsspm * ( rsd[i][ny-4][k][m] - 4.0 * rsd[i][ny-3][k][m] + 5.0 * rsd[i][ny-2][k][m] ); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private(u41, q) for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #pragma omp parallel for private(u41, q) for (k = 0; k <= nz-1; k++) { flux[i][j][k][0] = rsd[i][j][k][3]; u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u41; flux[i][j][k][2] = rsd[i][j][k][2] * u41; flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41; } #pragma omp parallel for for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] ); } } #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1) for (k = 1; k <= nz-1; k++) { tmp = 1.0 / rsd[i][j][k][0]; u21k = tmp * rsd[i][j][k][1]; u31k = tmp * rsd[i][j][k][2]; u41k = tmp * rsd[i][j][k][3]; u51k = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j][k-1][0]; u21km1 = tmp * rsd[i][j][k-1][1]; u31km1 = tmp * rsd[i][j][k-1][2]; u41km1 = tmp * rsd[i][j][k-1][3]; u51km1 = tmp * rsd[i][j][k-1][4]; flux[i][j][k][1] = tz3 * ( u21k - u21km1 ); flux[i][j][k][2] = tz3 * ( u31k - u31km1 ); flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k - u41km1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k ) - ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) ) + (1.0/6.0) * tz3 * ( u41k*u41k - u41km1*u41km1 ) + C1 * C5 * tz3 * ( u51k - u51km1 ); } #pragma omp parallel for for (k = 1; k <= nz - 2; k++) { frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * ( rsd[i][j][k+1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k-1][0] ); frct[i][j][k][1] = frct[i][j][k][1] + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] ) + dz2 * tz1 * ( rsd[i][j][k+1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k-1][1] ); frct[i][j][k][2] = frct[i][j][k][2] + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] ) + dz3 * tz1 * ( rsd[i][j][k+1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k-1][2] ); frct[i][j][k][3] = frct[i][j][k][3] + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] ) + dz4 * tz1 * ( rsd[i][j][k+1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k-1][3] ); frct[i][j][k][4] = frct[i][j][k][4] + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] ) + dz5 * tz1 * ( rsd[i][j][k+1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k-1][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[i][j][1][m] = frct[i][j][1][m] - dsspm * ( + 5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m] ); frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m] ); } #pragma omp parallel for (k = 3; k <= nz - 4; k++) { #pragma omp parallel for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i][j][k-2][m] - 4.0 * rsd[i][j][k-1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k+1][m] + rsd[i][j][k+2][m] ); } } for (m = 0; m < 5; m++) { frct[i][j][nz-3][m] = frct[i][j][nz-3][m] - dsspm * ( rsd[i][j][nz-5][m] - 4.0 * rsd[i][j][nz-4][m] + 6.0 * rsd[i][j][nz-3][m] - 4.0 * rsd[i][j][nz-2][m] ); frct[i][j][nz-2][m] = frct[i][j][nz-2][m] - dsspm * ( rsd[i][j][nz-4][m] - 4.0 * rsd[i][j][nz-3][m] + 5.0 * rsd[i][j][nz-2][m] ); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error(void) { /*-------------------------------------------------------------------- c c compute the solution error c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; double tmp; double u000ijk[5]; #pragma omp parallel for for (m = 0; m < 5; m++) { errnm[m] = 0.0; } for (i = ist; i <= iend; i++) { iglob = i; for (j = jst; j <= jend; j++) { jglob = j; for (k = 1; k <= nz-2; k++) { exact( iglob, jglob, k, u000ijk ); #pragma omp parallel for private(tmp) for (m = 0; m < 5; m++) { tmp = ( u000ijk[m] - u[i][j][k][m] ); errnm[m] = errnm[m] + tmp *tmp; } } } } #pragma omp parallel for for (m = 0; m < 5; m++) { errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) ); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact( int i, int j, int k, double u000ijk[5] ) { /*-------------------------------------------------------------------- c c compute the exact solution at (i,j,k) c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int m; double xi, eta, zeta; xi = ((double)i) / (nx0 - 1); eta = ((double)j) / (ny0 - 1); zeta = ((double)k) / (nz - 1); #pragma omp parallel for for (m = 0; m < 5; m++) { u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacld(int k) { /*-------------------------------------------------------------------- c compute the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j; double r43; double c1345; double c34; double tmp1, tmp2, tmp3; r43 = ( 4.0 / 3.0 ); c1345 = C1 * C3 * C4 * C5; c34 = C3 * C4; #pragma omp for private(tmp1, tmp2, tmp3) for (i = ist; i <= iend; i++) { #pragma omp parallel for private(tmp1, tmp2, tmp3) for (j = jst; j <= jend; j++) { /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 ); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][1] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) ); d[i][j][1][1] = 1.0 + dt * 2.0 * ( tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 ); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] ) + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) ); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 ); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][3] ) + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) ); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 ); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) ); d[i][j][4][1] = dt * 2.0 * ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] ); d[i][j][4][2] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] ); d[i][j][4][3] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] ); d[i][j][4][4] = 1.0 + dt * 2.0 * ( tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1 ) + dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 ); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k-1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = - dt * tz1 * dz1; a[i][j][0][1] = 0.0; a[i][j][0][2] = 0.0; a[i][j][0][3] = - dt * tz2; a[i][j][0][4] = 0.0; a[i][j][1][0] = - dt * tz2 * ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] ); a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2 ; a[i][j][1][2] = 0.0; a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 ); a[i][j][1][4] = 0.0; a[i][j][2][0] = - dt * tz2 * ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] ); a[i][j][2][1] = 0.0; a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * ( c34 * tmp1 ) - dt * tz1 * dz3; a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 ); a[i][j][2][4] = 0.0; a[i][j][3][0] = - dt * tz2 * ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 ) + 0.50 * C2 * ( ( u[i][j][k-1][1] * u[i][j][k-1][1] + u[i][j][k-1][2] * u[i][j][k-1][2] + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) ) - dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] ); a[i][j][3][1] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][1] * tmp1 ) ); a[i][j][3][2] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][2] * tmp1 ) ); a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 ) * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * ( r43 * c34 * tmp1 ) - dt * tz1 * dz4; a[i][j][3][4] = - dt * tz2 * C2; a[i][j][4][0] = - dt * tz2 * ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1] + u[i][j][k-1][2] * u[i][j][k-1][2] + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 - C1 * ( u[i][j][k-1][4] * tmp1 ) ) * ( u[i][j][k-1][3] * tmp1 ) ) - dt * tz1 * ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1]) - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2]) - ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3]) - c1345 * tmp2 * u[i][j][k-1][4] ); a[i][j][4][1] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1]; a[i][j][4][2] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2]; a[i][j][4][3] = - dt * tz2 * ( C1 * ( u[i][j][k-1][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j][k-1][1]*u[i][j][k-1][1] + u[i][j][k-1][2]*u[i][j][k-1][2] + 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) ) - dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3]; a[i][j][4][4] = - dt * tz2 * ( C1 * ( u[i][j][k-1][3] * tmp1 ) ) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j-1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = - dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = - dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = - dt * ty2 * ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] ); b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy2; b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 ); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = - dt * ty2 * ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 ) + 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1] + u[i][j-1][k][2] * u[i][j-1][k][2] + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2 ) ) - dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] ); b[i][j][2][1] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][1] * tmp1 ) ); b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 ) * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * ( r43 * c34 * tmp1 ) - dt * ty1 * dy3; b[i][j][2][3] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][3] * tmp1 ) ); b[i][j][2][4] = - dt * ty2 * C2; b[i][j][3][0] = - dt * ty2 * ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] ); b[i][j][3][1] = 0.0; b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 ); b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = - dt * ty2 * ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1] + u[i][j-1][k][2] * u[i][j-1][k][2] + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2 - C1 * ( u[i][j-1][k][4] * tmp1 ) ) * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1])) - ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2])) - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3])) - c1345*tmp2*u[i][j-1][k][4] ); b[i][j][4][1] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1]; b[i][j][4][2] = - dt * ty2 * ( C1 * ( u[i][j-1][k][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j-1][k][1]*u[i][j-1][k][1] + 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2] + u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) ) - dt * ty1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2]; b[i][j][4][3] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3]; b[i][j][4][4] = - dt * ty2 * ( C1 * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i-1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = - dt * tx1 * dx1; c[i][j][0][1] = - dt * tx2; c[i][j][0][2] = 0.0; c[i][j][0][3] = 0.0; c[i][j][0][4] = 0.0; c[i][j][1][0] = - dt * tx2 * ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 ) + C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1] + u[i-1][j][k][2] * u[i-1][j][k][2] + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] ); c[i][j][1][1] = - dt * tx2 * ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * ( r43 * c34 * tmp1 ) - dt * tx1 * dx2; c[i][j][1][2] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][2] * tmp1 ) ); c[i][j][1][3] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][3] * tmp1 ) ); c[i][j][1][4] = - dt * tx2 * C2; c[i][j][2][0] = - dt * tx2 * ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] ); c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 ); c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx3; c[i][j][2][3] = 0.0; c[i][j][2][4] = 0.0; c[i][j][3][0] = - dt * tx2 * ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] ); c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 ); c[i][j][3][2] = 0.0; c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx4; c[i][j][3][4] = 0.0; c[i][j][4][0] = - dt * tx2 * ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1] + u[i-1][j][k][2] * u[i-1][j][k][2] + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 - C1 * ( u[i-1][j][k][4] * tmp1 ) ) * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) ) - c1345 * tmp2 * u[i-1][j][k][4] ); c[i][j][4][1] = - dt * tx2 * ( C1 * ( u[i-1][j][k][4] * tmp1 ) - 0.50 * C2 * ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1] + u[i-1][j][k][2]*u[i-1][j][k][2] + u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) ) - dt * tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1]; c[i][j][4][2] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2]; c[i][j][4][3] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3]; c[i][j][4][4] = - dt * tx2 * ( C1 * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacu(int k) { /*-------------------------------------------------------------------- c compute the upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j; double r43; double c1345; double c34; double tmp1, tmp2, tmp3; r43 = ( 4.0 / 3.0 ); c1345 = C1 * C3 * C4 * C5; c34 = C3 * C4; //#if defined(_OPENMP) #pragma omp parallel for private(tmp1, tmp2, tmp3) for (i = iend; i >= ist; i--) { #pragma omp parallel for private(tmp1, tmp2, tmp3) for (j = jend; j >= jst; j--) { /*#else for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #endif*/ /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 ); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][1] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) ); d[i][j][1][1] = 1.0 + dt * 2.0 * ( tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 ); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] ) + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) ); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 ); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][3] ) + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) ); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 ); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) ); d[i][j][4][1] = dt * 2.0 * ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] ); d[i][j][4][2] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] ); d[i][j][4][3] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] ); d[i][j][4][4] = 1.0 + dt * 2.0 * ( tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1 ) + dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 ); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i+1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = - dt * tx1 * dx1; a[i][j][0][1] = dt * tx2; a[i][j][0][2] = 0.0; a[i][j][0][3] = 0.0; a[i][j][0][4] = 0.0; a[i][j][1][0] = dt * tx2 * ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 ) + C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1] + u[i+1][j][k][2] * u[i+1][j][k][2] + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] ); a[i][j][1][1] = dt * tx2 * ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * ( r43 * c34 * tmp1 ) - dt * tx1 * dx2; a[i][j][1][2] = dt * tx2 * ( - C2 * ( u[i+1][j][k][2] * tmp1 ) ); a[i][j][1][3] = dt * tx2 * ( - C2 * ( u[i+1][j][k][3] * tmp1 ) ); a[i][j][1][4] = dt * tx2 * C2 ; a[i][j][2][0] = dt * tx2 * ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] ); a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 ); a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx3; a[i][j][2][3] = 0.0; a[i][j][2][4] = 0.0; a[i][j][3][0] = dt * tx2 * ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] ); a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 ); a[i][j][3][2] = 0.0; a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx4; a[i][j][3][4] = 0.0; a[i][j][4][0] = dt * tx2 * ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1] + u[i+1][j][k][2] * u[i+1][j][k][2] + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 - C1 * ( u[i+1][j][k][4] * tmp1 ) ) * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) ) - c1345 * tmp2 * u[i+1][j][k][4] ); a[i][j][4][1] = dt * tx2 * ( C1 * ( u[i+1][j][k][4] * tmp1 ) - 0.50 * C2 * ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1] + u[i+1][j][k][2]*u[i+1][j][k][2] + u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) ) - dt * tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1]; a[i][j][4][2] = dt * tx2 * ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2]; a[i][j][4][3] = dt * tx2 * ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3]; a[i][j][4][4] = dt * tx2 * ( C1 * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j+1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = - dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = dt * ty2 * ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] ); b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy2; b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 ); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = dt * ty2 * ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 ) + 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1] + u[i][j+1][k][2] * u[i][j+1][k][2] + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2 ) ) - dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] ); b[i][j][2][1] = dt * ty2 * ( - C2 * ( u[i][j+1][k][1] * tmp1 ) ); b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 ) * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * ( r43 * c34 * tmp1 ) - dt * ty1 * dy3; b[i][j][2][3] = dt * ty2 * ( - C2 * ( u[i][j+1][k][3] * tmp1 ) ); b[i][j][2][4] = dt * ty2 * C2; b[i][j][3][0] = dt * ty2 * ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] ); b[i][j][3][1] = 0.0; b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 ); b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = dt * ty2 * ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1] + u[i][j+1][k][2] * u[i][j+1][k][2] + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2 - C1 * ( u[i][j+1][k][4] * tmp1 ) ) * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) ) - ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) ) - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) ) - c1345*tmp2*u[i][j+1][k][4] ); b[i][j][4][1] = dt * ty2 * ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1]; b[i][j][4][2] = dt * ty2 * ( C1 * ( u[i][j+1][k][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j+1][k][1]*u[i][j+1][k][1] + 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2] + u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) ) - dt * ty1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2]; b[i][j][4][3] = dt * ty2 * ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3]; b[i][j][4][4] = dt * ty2 * ( C1 * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k+1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = - dt * tz1 * dz1; c[i][j][0][1] = 0.0; c[i][j][0][2] = 0.0; c[i][j][0][3] = dt * tz2; c[i][j][0][4] = 0.0; c[i][j][1][0] = dt * tz2 * ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] ); c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2 ; c[i][j][1][2] = 0.0; c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 ); c[i][j][1][4] = 0.0; c[i][j][2][0] = dt * tz2 * ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] ); c[i][j][2][1] = 0.0; c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * ( c34 * tmp1 ) - dt * tz1 * dz3; c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 ); c[i][j][2][4] = 0.0; c[i][j][3][0] = dt * tz2 * ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 ) + 0.50 * C2 * ( ( u[i][j][k+1][1] * u[i][j][k+1][1] + u[i][j][k+1][2] * u[i][j][k+1][2] + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) ) - dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] ); c[i][j][3][1] = dt * tz2 * ( - C2 * ( u[i][j][k+1][1] * tmp1 ) ); c[i][j][3][2] = dt * tz2 * ( - C2 * ( u[i][j][k+1][2] * tmp1 ) ); c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 ) * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * ( r43 * c34 * tmp1 ) - dt * tz1 * dz4; c[i][j][3][4] = dt * tz2 * C2; c[i][j][4][0] = dt * tz2 * ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1] + u[i][j][k+1][2] * u[i][j][k+1][2] + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 - C1 * ( u[i][j][k+1][4] * tmp1 ) ) * ( u[i][j][k+1][3] * tmp1 ) ) - dt * tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) ) - ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) ) - c1345 * tmp2 * u[i][j][k+1][4] ); c[i][j][4][1] = dt * tz2 * ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1]; c[i][j][4][2] = dt * tz2 * ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2]; c[i][j][4][3] = dt * tz2 * ( C1 * ( u[i][j][k+1][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j][k+1][1]*u[i][j][k+1][1] + u[i][j][k+1][2]*u[i][j][k+1][2] + 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) ) - dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3]; c[i][j][4][4] = dt * tz2 * ( C1 * ( u[i][j][k+1][3] * tmp1 ) ) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void l2norm (int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double sum[5]) { { /*-------------------------------------------------------------------- c to compute the l2-norm of vector v. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0; #pragma omp parallel for for (m = 0; m < 5; m++) { sum[m] = 0.0; } #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) for (i = ist; i <= iend; i++) { #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) for (j = jst; j <= jend; j++) { #pragma omp parallel for reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) for (k = 1; k <= nz0-2; k++) { sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } { sum[0] += sum0; sum[1] += sum1; sum[2] += sum2; sum[3] += sum3; sum[4] += sum4; } #pragma omp parallel for for (m = 0; m < 5; m++) { sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pintgr(void) { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k; int ibeg, ifin, ifin1; int jbeg, jfin, jfin1; int iglob, iglob1, iglob2; int jglob, jglob1, jglob2; double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */ double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */ double frc1, frc2, frc3; /*-------------------------------------------------------------------- c set up the sub-domains for integeration in each processor --------------------------------------------------------------------*/ ibeg = nx; ifin = 0; iglob1 = -1; iglob2 = nx-1; if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0; if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx; if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1; if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2; jbeg = ny; jfin = -1; jglob1 = 0; jglob2 = ny-1; if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0; if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny; if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1; if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2; ifin1 = ifin; jfin1 = jfin; if (ifin1 == ii2) ifin1 = ifin -1; if (jfin1 == ji2) jfin1 = jfin -1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i <= ISIZ2+1; i++) { #pragma omp parallel for for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } #pragma omp parallel for private(iglob, k) for (i = ibeg; i <= ifin; i++) { iglob = i; #pragma omp parallel for private(iglob, k) for (j = jbeg; j <= jfin; j++) { jglob = j; k = ki1; phi1[i][j] = C2*( u[i][j][k][4] - 0.50 * ( pow2(u[i][j][k][1]) + pow2(u[i][j][k][2]) + pow2(u[i][j][k][3]) ) / u[i][j][k][0] ); k = ki2; phi2[i][j] = C2*( u[i][j][k][4] - 0.50 * ( pow2(u[i][j][k][1]) + pow2(u[i][j][k][2]) + pow2(u[i][j][k][3]) ) / u[i][j][k][0] ); } } frc1 = 0.0; #pragma omp parallel for reduction(+:frc1) for (i = ibeg; i <= ifin1; i++) { #pragma omp parallel for reduction(+:frc1) for (j = jbeg; j <= jfin1; j++) { frc1 = frc1 + ( phi1[i][j] + phi1[i+1][j] + phi1[i][j+1] + phi1[i+1][j+1] + phi2[i][j] + phi2[i+1][j] + phi2[i][j+1] + phi2[i+1][j+1] ); } } frc1 = dxi * deta * frc1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i <= ISIZ2+1; i++) { #pragma omp parallel for for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } jglob = jbeg; if (jglob == ji1) { #pragma omp parallel for private(iglob) for (i = ibeg; i <= ifin; i++) { iglob = i; #pragma omp parallel for for (k = ki1; k <= ki2; k++) { phi1[i][k] = C2*( u[i][jbeg][k][4] - 0.50 * ( pow2(u[i][jbeg][k][1]) + pow2(u[i][jbeg][k][2]) + pow2(u[i][jbeg][k][3]) ) / u[i][jbeg][k][0] ); } } } jglob = jfin; if (jglob == ji2) { #pragma omp parallel for private(iglob) for (i = ibeg; i <= ifin; i++) { iglob = i; #pragma omp parallel for for (k = ki1; k <= ki2; k++) { phi2[i][k] = C2*( u[i][jfin][k][4] - 0.50 * ( pow2(u[i][jfin][k][1]) + pow2(u[i][jfin][k][2]) + pow2(u[i][jfin][k][3]) ) / u[i][jfin][k][0] ); } } } frc2 = 0.0; #pragma omp parallel for reduction(+:frc2) for (i = ibeg; i <= ifin1; i++) { #pragma omp parallel for reduction(+:frc2) for (k = ki1; k <= ki2-1; k++) { frc2 = frc2 + ( phi1[i][k] + phi1[i+1][k] + phi1[i][k+1] + phi1[i+1][k+1] + phi2[i][k] + phi2[i+1][k] + phi2[i][k+1] + phi2[i+1][k+1] ); } } frc2 = dxi * dzeta * frc2; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i <= ISIZ2+1; i++) { #pragma omp parallel for firstprivate(i ) for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } iglob = ibeg; if (iglob == ii1) { for (j = jbeg; j <= jfin; j++) { jglob = j; for (k = ki1; k <= ki2; k++) { phi1[j][k] = C2*( u[ibeg][j][k][4] - 0.50 * ( pow2(u[ibeg][j][k][1]) + pow2(u[ibeg][j][k][2]) + pow2(u[ibeg][j][k][3]) ) / u[ibeg][j][k][0] ); } } } iglob = ifin; if (iglob == ii2) { for (j = jbeg; j <= jfin; j++) { jglob = j; for (k = ki1; k <= ki2; k++) { phi2[j][k] = C2*( u[ifin][j][k][4] - 0.50 * ( pow2(u[ifin][j][k][1]) + pow2(u[ifin][j][k][2]) + pow2(u[ifin][j][k][3]) ) / u[ifin][j][k][0] ); } } } frc3 = 0.0; for (j = jbeg; j <= jfin1; j++) { for (k = ki1; k <= ki2-1; k++) { frc3 = frc3 + ( phi1[j][k] + phi1[j+1][k] + phi1[j][k+1] + phi1[j+1][k+1] + phi2[j][k] + phi2[j+1][k] + phi2[j][k+1] + phi2[j+1][k+1] ); } } frc3 = deta * dzeta * frc3; frc = 0.25 * ( frc1 + frc2 + frc3 ); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void read_input(void) { FILE *fp; /*-------------------------------------------------------------------- c if input file does not exist, it uses defaults c ipr = 1 for detailed progress output c inorm = how often the norm is printed (once every inorm iterations) c itmax = number of pseudo time steps c dt = time step c omega 1 over-relaxation factor for SSOR c tolrsd = steady state residual tolerance levels c nx, ny, nz = number of grid points in x, y, z directions --------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - LU Benchmark\n\n"); fp = fopen("inputlu.data", "r"); if (fp != NULL) { printf(" Reading from input file inputlu.data\n"); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d", &ipr, &inorm); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d", &itmax); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf", &dt); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf", &omega); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf%lf%lf%lf%lf", &tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0); while(fgetc(fp) != '\n'); fclose(fp); } else { ipr = IPR_DEFAULT; inorm = INORM_DEFAULT; itmax = ITMAX_DEFAULT; dt = DT_DEFAULT; omega = OMEGA_DEFAULT; tolrsd[0] = TOLRSD1_DEF; tolrsd[1] = TOLRSD2_DEF; tolrsd[2] = TOLRSD3_DEF; tolrsd[3] = TOLRSD4_DEF; tolrsd[4] = TOLRSD5_DEF; nx0 = ISIZ1; ny0 = ISIZ2; nz0 = ISIZ3; } /*-------------------------------------------------------------------- c check problem size --------------------------------------------------------------------*/ if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) { printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); exit(1); } if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) { printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); exit(1); } printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0); printf(" Iterations: %3d\n", itmax); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs(void) { { /*-------------------------------------------------------------------- c compute the right hand sides --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int L1, L2; int ist1, iend1; int jst1, jend1; double q; double u21, u31, u41; double tmp; double u21i, u31i, u41i, u51i; double u21j, u31j, u41j, u51j; double u21k, u31k, u41k, u51k; double u21im1, u31im1, u41im1, u51im1; double u21jm1, u31jm1, u41jm1, u51jm1; double u21km1, u31km1, u41km1, u51km1; #pragma omp parallel for for (i = 0; i <= nx-1; i++) { #pragma omp parallel for for (j = 0; j <= ny-1; j++) { #pragma omp parallel for for (k = 0; k <= nz-1; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { rsd[i][j][k][m] = - frct[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx-1; #pragma omp parallel for for (i = L1; i <= L2; i++) { #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = u[i][j][k][1]; u21 = u[i][j][k][1] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * ( u[i][j][k][4] - q ); flux[i][j][k][2] = u[i][j][k][2] * u21; flux[i][j][k][3] = u[i][j][k][3] * u21; flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21; } } } #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1) for (j = jst; j <= jend; j++) { #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1, ist1, iend1) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] ); } } L2 = nx-1; #pragma omp parallel for private(tmp, u21i, u31i, u41i, u51i, u21im1, u31im1, u41im1, u51im1) for (i = ist; i <= L2; i++) { tmp = 1.0 / u[i][j][k][0]; u21i = tmp * u[i][j][k][1]; u31i = tmp * u[i][j][k][2]; u41i = tmp * u[i][j][k][3]; u51i = tmp * u[i][j][k][4]; tmp = 1.0 / u[i-1][j][k][0]; u21im1 = tmp * u[i-1][j][k][1]; u31im1 = tmp * u[i-1][j][k][2]; u41im1 = tmp * u[i-1][j][k][3]; u51im1 = tmp * u[i-1][j][k][4]; flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1); flux[i][j][k][2] = tx3 * ( u31i - u31im1 ); flux[i][j][k][3] = tx3 * ( u41i - u41im1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) ) - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) ) + (1.0/6.0) * tx3 * ( pow2(u21i) - pow2(u21im1) ) + C1 * C5 * tx3 * ( u51i - u51im1 ); } #pragma omp parallel for for (i = ist; i <= iend; i++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * ( u[i-1][j][k][0] - 2.0 * u[i][j][k][0] + u[i+1][j][k][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] ) + dx2 * tx1 * ( u[i-1][j][k][1] - 2.0 * u[i][j][k][1] + u[i+1][j][k][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] ) + dx3 * tx1 * ( u[i-1][j][k][2] - 2.0 * u[i][j][k][2] + u[i+1][j][k][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] ) + dx4 * tx1 * ( u[i-1][j][k][3] - 2.0 * u[i][j][k][3] + u[i+1][j][k][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] ) + dx5 * tx1 * ( u[i-1][j][k][4] - 2.0 * u[i][j][k][4] + u[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * ( + 5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m] ); rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * ( - 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m] ); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <= iend1; i++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0 * u[i-1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i+1][j][k][m] + u[i+2][j][k][m] ); } } for (m = 0; m < 5; m++) { rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m] - dssp * ( u[nx-5][j][k][m] - 4.0 * u[nx-4][j][k][m] + 6.0 * u[nx-3][j][k][m] - 4.0 * u[nx-2][j][k][m] ); rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m] - dssp * ( u[nx-4][j][k][m] - 4.0 * u[nx-3][j][k][m] + 5.0 * u[nx-2][j][k][m] ); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny-1; #pragma omp parallel for private(u31, q) for (i = ist; i <= iend; i++) { #pragma omp parallel for private(u31, q) for (j = L1; j <= L2; j++) { #pragma omp parallel for private(u31, q) for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = u[i][j][k][2]; u31 = u[i][j][k][2] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u31; flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q); flux[i][j][k][3] = u[i][j][k][3] * u31; flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31; } } } #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) for (i = ist; i <= iend; i++) { #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1, jst1, jend1) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] ); } } L2 = ny-1; #pragma omp parallel for private(tmp, u21j, u31j, u41j, u51j, u21jm1, u31jm1, u41jm1, u51jm1) for (j = jst; j <= L2; j++) { tmp = 1.0 / u[i][j][k][0]; u21j = tmp * u[i][j][k][1]; u31j = tmp * u[i][j][k][2]; u41j = tmp * u[i][j][k][3]; u51j = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j-1][k][0]; u21jm1 = tmp * u[i][j-1][k][1]; u31jm1 = tmp * u[i][j-1][k][2]; u41jm1 = tmp * u[i][j-1][k][3]; u51jm1 = tmp * u[i][j-1][k][4]; flux[i][j][k][1] = ty3 * ( u21j - u21jm1 ); flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1); flux[i][j][k][3] = ty3 * ( u41j - u41jm1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) ) - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) ) + (1.0/6.0) * ty3 * ( pow2(u31j) - pow2(u31jm1) ) + C1 * C5 * ty3 * ( u51j - u51jm1 ); } #pragma omp parallel for for (j = jst; j <= jend; j++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * ( u[i][j-1][k][0] - 2.0 * u[i][j][k][0] + u[i][j+1][k][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] ) + dy2 * ty1 * ( u[i][j-1][k][1] - 2.0 * u[i][j][k][1] + u[i][j+1][k][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] ) + dy3 * ty1 * ( u[i][j-1][k][2] - 2.0 * u[i][j][k][2] + u[i][j+1][k][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] ) + dy4 * ty1 * ( u[i][j-1][k][3] - 2.0 * u[i][j][k][3] + u[i][j+1][k][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] ) + dy5 * ty1 * ( u[i][j-1][k][4] - 2.0 * u[i][j][k][4] + u[i][j+1][k][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * ( + 5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m] ); rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * ( - 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m] ); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0 * u[i][j-1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j+1][k][m] + u[i][j+2][k][m] ); } } for (m = 0; m < 5; m++) { rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m] - dssp * ( u[i][ny-5][k][m] - 4.0 * u[i][ny-4][k][m] + 6.0 * u[i][ny-3][k][m] - 4.0 * u[i][ny-2][k][m] ); rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m] - dssp * ( u[i][ny-4][k][m] - 4.0 * u[i][ny-3][k][m] + 5.0 * u[i][ny-2][k][m] ); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1, u41, q) for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #pragma omp parallel for private(u41, q) for (k = 0; k <= nz-1; k++) { flux[i][j][k][0] = u[i][j][k][3]; u41 = u[i][j][k][3] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u41; flux[i][j][k][2] = u[i][j][k][2] * u41; flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q); flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41; } #pragma omp parallel for for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] ); } } #pragma omp parallel for private(tmp, u21k, u31k, u41k, u51k, u21km1, u31km1, u41km1, u51km1) for (k = 1; k <= nz-1; k++) { tmp = 1.0 / u[i][j][k][0]; u21k = tmp * u[i][j][k][1]; u31k = tmp * u[i][j][k][2]; u41k = tmp * u[i][j][k][3]; u51k = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j][k-1][0]; u21km1 = tmp * u[i][j][k-1][1]; u31km1 = tmp * u[i][j][k-1][2]; u41km1 = tmp * u[i][j][k-1][3]; u51km1 = tmp * u[i][j][k-1][4]; flux[i][j][k][1] = tz3 * ( u21k - u21km1 ); flux[i][j][k][2] = tz3 * ( u31k - u31km1 ); flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) ) - ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) ) + (1.0/6.0) * tz3 * ( pow2(u41k) - pow2(u41km1) ) + C1 * C5 * tz3 * ( u51k - u51km1 ); } #pragma omp parallel for for (k = 1; k <= nz - 2; k++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * ( u[i][j][k-1][0] - 2.0 * u[i][j][k][0] + u[i][j][k+1][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] ) + dz2 * tz1 * ( u[i][j][k-1][1] - 2.0 * u[i][j][k][1] + u[i][j][k+1][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] ) + dz3 * tz1 * ( u[i][j][k-1][2] - 2.0 * u[i][j][k][2] + u[i][j][k+1][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] ) + dz4 * tz1 * ( u[i][j][k-1][3] - 2.0 * u[i][j][k][3] + u[i][j][k+1][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] ) + dz5 * tz1 * ( u[i][j][k-1][4] - 2.0 * u[i][j][k][4] + u[i][j][k+1][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * ( + 5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m] ); rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * ( - 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m] ); } #pragma omp parallel for for (k = 3; k <= nz - 4; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0 * u[i][j][k-1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k+1][m] + u[i][j][k+2][m] ); } } for (m = 0; m < 5; m++) { rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m] - dssp * ( u[i][j][nz-5][m] - 4.0 * u[i][j][nz-4][m] + 6.0 * u[i][j][nz-3][m] - 4.0 * u[i][j][nz-2][m] ); rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m] - dssp * ( u[i][j][nz-4][m] - 4.0 * u[i][j][nz-3][m] + 5.0 * u[i][j][nz-2][m] ); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setbv(void) { { /*-------------------------------------------------------------------- c set the boundary values of dependent variables --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k; int iglob, jglob; /*-------------------------------------------------------------------- c set the dependent variable values along the top and bottom faces --------------------------------------------------------------------*/ #pragma omp parallel for private(iglob) for (i = 0; i < nx; i++) { iglob = i; for (j = 0; j < ny; j++) { jglob = j; exact( iglob, jglob, 0, &u[i][j][0][0] ); exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] ); } } /*-------------------------------------------------------------------- c set the dependent variable values along north and south faces --------------------------------------------------------------------*/ #pragma omp parallel for private(iglob) for (i = 0; i < nx; i++) { iglob = i; for (k = 0; k < nz; k++) { exact( iglob, 0, k, &u[i][0][k][0] ); } } #pragma omp parallel for private(iglob) for (i = 0; i < nx; i++) { iglob = i; for (k = 0; k < nz; k++) { exact( iglob, ny0-1, k, &u[i][ny-1][k][0] ); } } /*-------------------------------------------------------------------- c set the dependent variable values along east and west faces --------------------------------------------------------------------*/ #pragma omp parallel for private(jglob) for (j = 0; j < ny; j++) { jglob = j; for (k = 0; k < nz; k++) { exact( 0, jglob, k, &u[0][j][k][0] ); } } #pragma omp parallel for private(jglob) for (j = 0; j < ny; j++) { jglob = j; for (k = 0; k < nz; k++) { exact( nx0-1, jglob, k, &u[nx-1][j][k][0] ); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setcoeff(void) { /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ dxi = 1.0 / ( nx0 - 1 ); deta = 1.0 / ( ny0 - 1 ); dzeta = 1.0 / ( nz0 - 1 ); tx1 = 1.0 / ( dxi * dxi ); tx2 = 1.0 / ( 2.0 * dxi ); tx3 = 1.0 / dxi; ty1 = 1.0 / ( deta * deta ); ty2 = 1.0 / ( 2.0 * deta ); ty3 = 1.0 / deta; tz1 = 1.0 / ( dzeta * dzeta ); tz2 = 1.0 / ( 2.0 * dzeta ); tz3 = 1.0 / dzeta; ii1 = 1; ii2 = nx0 - 2; ji1 = 1; ji2 = ny0 - 3; ki1 = 2; ki2 = nz0 - 2; /*-------------------------------------------------------------------- c diffusion coefficients --------------------------------------------------------------------*/ dx1 = 0.75; dx2 = dx1; dx3 = dx1; dx4 = dx1; dx5 = dx1; dy1 = 0.75; dy2 = dy1; dy3 = dy1; dy4 = dy1; dy5 = dy1; dz1 = 1.00; dz2 = dz1; dz3 = dz1; dz4 = dz1; dz5 = dz1; /*-------------------------------------------------------------------- c fourth difference dissipation --------------------------------------------------------------------*/ dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0; /*-------------------------------------------------------------------- c coefficients of the exact solution to the first pde --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 5.0e-01; ce[0][7] = 2.0e-02; ce[0][8] = 1.0e-02; ce[0][9] = 3.0e-02; ce[0][10] = 5.0e-01; ce[0][11] = 4.0e-01; ce[0][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the second pde --------------------------------------------------------------------*/ ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 1.0e-02; ce[1][8] = 3.0e-02; ce[1][9] = 2.0e-02; ce[1][10] = 4.0e-01; ce[1][11] = 3.0e-01; ce[1][12] = 5.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the third pde --------------------------------------------------------------------*/ ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 4.0e-02; ce[2][8] = 3.0e-02; ce[2][9] = 5.0e-02; ce[2][10] = 3.0e-01; ce[2][11] = 5.0e-01; ce[2][12] = 4.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fourth pde --------------------------------------------------------------------*/ ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 3.0e-02; ce[3][8] = 5.0e-02; ce[3][9] = 4.0e-02; ce[3][10] = 2.0e-01; ce[3][11] = 1.0e-01; ce[3][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fifth pde --------------------------------------------------------------------*/ ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 1.0e-01; ce[4][5] = 4.0e-01; ce[4][6] = 3.0e-01; ce[4][7] = 5.0e-02; ce[4][8] = 4.0e-02; ce[4][9] = 3.0e-02; ce[4][10] = 1.0e-01; ce[4][11] = 3.0e-01; ce[4][12] = 2.0e-01; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setiv(void) { { /*-------------------------------------------------------------------- c c set the initial values of independent variables based on tri-linear c interpolation of boundary values in the computational space. c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; double xi, eta, zeta; double pxi, peta, pzeta; double ue_1jk[5],ue_nx0jk[5],ue_i1k[5], ue_iny0k[5],ue_ij1[5],ue_ijnz[5]; #pragma omp parallel for private(jglob, zeta, eta, iglob, xi, pxi, peta, pzeta) for (j = 0; j < ny; j++) { jglob = j; for (k = 1; k < nz - 1; k++) { zeta = ((double)k) / (nz-1); if (jglob != 0 && jglob != ny0-1) { eta = ( (double) (jglob) ) / (ny0-1); for (i = 0; i < nx; i++) { iglob = i; if(iglob != 0 && iglob != nx0-1) { xi = ( (double) (iglob) ) / (nx0-1); exact (0,jglob,k,ue_1jk); exact (nx0-1,jglob,k,ue_nx0jk); exact (iglob,0,k,ue_i1k); exact (iglob,ny0-1,k,ue_iny0k); exact (iglob,jglob,0,ue_ij1); exact (iglob,jglob,nz-1,ue_ijnz); #pragma omp parallel for private(pxi, peta, pzeta) for (m = 0; m < 5; m++) { pxi = ( 1.0 - xi ) * ue_1jk[m] + xi * ue_nx0jk[m]; peta = ( 1.0 - eta ) * ue_i1k[m] + eta * ue_iny0k[m]; pzeta = ( 1.0 - zeta ) * ue_ij1[m] + zeta * ue_ijnz[m]; u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ssor(void) { /*-------------------------------------------------------------------- c to perform pseudo-time stepping SSOR iterations c for five nonlinear pde s. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int istep; double tmp; double delunm[5], tv[ISIZ1][ISIZ2][5]; /*-------------------------------------------------------------------- c begin pseudo-time stepping iterations --------------------------------------------------------------------*/ tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ; /*-------------------------------------------------------------------- c initialize a,b,c,d to zero (guarantees that page tables have been c formed, if applicable on given architecture, before timestepping). --------------------------------------------------------------------*/ { #pragma omp parallel for for (i = 0; i < ISIZ1; i++) { #pragma omp parallel for for (j = 0; j < ISIZ2; j++) { #pragma omp parallel for for (k = 0; k < 5; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { a[i][j][k][m] = 0.0; b[i][j][k][m] = 0.0; c[i][j][k][m] = 0.0; d[i][j][k][m] = 0.0; } } } } } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the L2 norms of newton iteration residuals --------------------------------------------------------------------*/ l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm ); timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c the timestep loop --------------------------------------------------------------------*/ for (istep = 1; istep <= itmax; istep++) { if (istep%20 == 0 || istep == itmax || istep == 1) { printf(" Time step %4d\n", istep); } { /*-------------------------------------------------------------------- c perform SSOR iteration --------------------------------------------------------------------*/ #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } for (k = 1; k <= nz - 2; k++) { /*-------------------------------------------------------------------- c form the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacld(k); /*-------------------------------------------------------------------- c perform the lower triangular solution --------------------------------------------------------------------*/ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0 ); } for (k = nz - 2; k >= 1; k--) { /*-------------------------------------------------------------------- c form the strictly upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacu(k); /*-------------------------------------------------------------------- c perform the upper triangular solution --------------------------------------------------------------------*/ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0 ); } /*-------------------------------------------------------------------- c update the variables --------------------------------------------------------------------*/ #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for for (k = 1; k <= nz-2; k++) { #pragma omp parallel for for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } } /* end parallel */ /*-------------------------------------------------------------------- c compute the max-norms of newton iteration corrections --------------------------------------------------------------------*/ if ( istep % inorm == 0 ) { l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, delunm ); } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the max-norms of newton iteration residuals --------------------------------------------------------------------*/ if ( ( istep % inorm == 0 ) || ( istep == itmax ) ) { l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm ); } /*-------------------------------------------------------------------- c check the newton-iteration residuals against the tolerance levels --------------------------------------------------------------------*/ if ( ( rsdnm[0] < tolrsd[0] ) && ( rsdnm[1] < tolrsd[1] ) && ( rsdnm[2] < tolrsd[2] ) && ( rsdnm[3] < tolrsd[3] ) && ( rsdnm[4] < tolrsd[4] ) ) { exit(1); } } timer_stop(1); maxtime= timer_read(1); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(double xcr[5], double xce[5], double xci, char *class, boolean *verified) { /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5],xceref[5],xciref, xcrdif[5],xcedif[5],xcidif, epsilon, dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; *class = 'U'; *verified = TRUE; #pragma omp parallel for for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } xciref = 1.0; if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) { *class = 'S'; dtref = 5.0e-1; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xcrref[0] = 1.6196343210976702e-02; xcrref[1] = 2.1976745164821318e-03; xcrref[2] = 1.5179927653399185e-03; xcrref[3] = 1.5029584435994323e-03; xcrref[4] = 3.4264073155896461e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xceref[0] = 6.4223319957960924e-04; xceref[1] = 8.4144342047347926e-05; xceref[2] = 5.8588269616485186e-05; xceref[3] = 5.8474222595157350e-05; xceref[4] = 1.3103347914111294e-03; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xciref = 7.8418928865937083; } else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) { *class = 'W'; /* SPEC95fp size */ dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (33x33x33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xcrref[0] = 0.1236511638192e+02; xcrref[1] = 0.1317228477799e+01; xcrref[2] = 0.2550120713095e+01; xcrref[3] = 0.2326187750252e+01; xcrref[4] = 0.2826799444189e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (33X33X33) grid, --------------------------------------------------------------------*/ xceref[0] = 0.4867877144216; xceref[1] = 0.5064652880982e-01; xceref[2] = 0.9281818101960e-01; xceref[3] = 0.8570126542733e-01; xceref[4] = 0.1084277417792e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (33X33X33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xciref = 0.1161399311023e+02; } else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) { *class = 'A'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 7.7902107606689367e+02; xcrref[1] = 6.3402765259692870e+01; xcrref[2] = 1.9499249727292479e+02; xcrref[3] = 1.7845301160418537e+02; xcrref[4] = 1.8384760349464247e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.9964085685471943e+01; xceref[1] = 2.8194576365003349; xceref[2] = 7.3473412698774742; xceref[3] = 6.7139225687777051; xceref[4] = 7.0715315688392578e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 2.6030925604886277e+01; } else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) { *class = 'B'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 3.5532672969982736e+03; xcrref[1] = 2.6214750795310692e+02; xcrref[2] = 8.8333721850952190e+02; xcrref[3] = 7.7812774739425265e+02; xcrref[4] = 7.3087969592545314e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (102X102X102) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 1.1401176380212709e+02; xceref[1] = 8.1098963655421574; xceref[2] = 2.8480597317698308e+01; xceref[3] = 2.5905394567832939e+01; xceref[4] = 2.6054907504857413e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 4.7887162703308227e+01; } else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) { *class = 'C'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 1.03766980323537846e+04; xcrref[1] = 8.92212458801008552e+02; xcrref[2] = 2.56238814582660871e+03; xcrref[3] = 2.19194343857831427e+03; xcrref[4] = 1.78078057261061185e+04; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (162X162X162) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.15986399716949279e+02; xceref[1] = 1.55789559239863600e+01; xceref[2] = 5.41318863077207766e+01; xceref[3] = 4.82262643154045421e+01; xceref[4] = 4.55902910043250358e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 6.66404553572181300e+01; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ #pragma omp parallel for for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } xcidif = fabs((xci - xciref)/xciref); /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (*class != 'U') { printf("\n Verification being performed for class %1c\n", *class); printf(" Accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d %20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d %20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } } if (*class != 'U') { printf(" Comparison of surface integral\n"); } else { printf(" Surface integral\n"); } if (*class == 'U') { printf(" %20.13e\n", xci); } else if (xcidif > epsilon) { *verified = FALSE; printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } else { printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } }
GB_binop__pow_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__pow_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint32) // C=scalar+B GB (_bind1st__pow_uint32) // C=scalar+B' GB (_bind1st_tran__pow_uint32) // C=A+scalar GB (_bind2nd__pow_uint32) // C=A'+scalar GB (_bind2nd_tran__pow_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_pow_uint32 (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_pow_uint32 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT32 || GxB_NO_POW_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__pow_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__pow_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = GB_pow_uint32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = GB_pow_uint32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint32 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint32 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fibonacci.c
#include "timing.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> long int fibonacci(int n) { long int x, y; if (n < 2) { return n; } else { #pragma omp task shared(x) x = fibonacci(n-1); #pragma omp task shared(y) y = fibonacci(n-2); #pragma omp taskwait return (x+y); } } int main() { int n = 42; double t1, t2; long int fib = 0; t1 = second(); #pragma omp parallel { #pragma omp single nowait { fib = fibonacci(n); } } t2 = second(); printf("fib(%d) = %ld (in %g [s])\n", n, fib, (t2-t1)); return 0; }
ColorChartDetectionUtils.h
#ifndef CAPTURE3_COLOR_CHART_UTILS_H #define CAPTURE3_COLOR_CHART_UTILS_H #include <cmath> #include <vector> #include <omp.h> #include <opencv2/core.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/calib3d.hpp> #include "../engine/objects/colorChart/ColorChart.h" #include "../engine/objects/colorChart/ColorChartPatch.h" #include "../constants/ColorChartConstants.h" #include "../constants/ResourceConstants.h" #include "../engine/objects/image/Image.h" #include "ColorUtils.h" namespace Capture3 { static ColorChart *detectColorChart(const Image *image) { // Create profile auto *chart = new ColorChart(); const std::vector<ColorChartPatch *> &patches = chart->getPatches(); /************************************************** * Then we iterate over the captured image and * find the min and max values for each channel, * these values are needed to apply auto-contrast. **************************************************/ // Fetch image size const double *imageData = image->getRGB().getData(); const cv::Mat &imageMat = image->getRGB().getMat(); const unsigned int imageWidth = image->getSize().getWidth(); const unsigned int imageHeight = image->getSize().getHeight(); const unsigned int imageArea = image->getSize().getArea(); const cv::Size &imageSize = image->getSize().getSize(); // Min and max values double valueMinR = 1; double valueMinG = 1; double valueMinB = 1; double valueMaxR = 0; double valueMaxG = 0; double valueMaxB = 0; // Find min and max values (for auto contrast) #pragma omp parallel for schedule(static) \ reduction(min:valueMinR), \ reduction(min:valueMinG), \ reduction(min:valueMinB), \ reduction(max:valueMaxR), \ reduction(max:valueMaxG), \ reduction(max:valueMaxB) for (unsigned int i = 0; i < imageArea; i++) { const unsigned int index = i * 3; const double colorR = imageData[index + 0]; const double colorG = imageData[index + 1]; const double colorB = imageData[index + 2]; valueMinR = colorR < valueMinR ? colorR : valueMinR; valueMinG = colorG < valueMinG ? colorG : valueMinG; valueMinB = colorB < valueMinB ? colorB : valueMinB; valueMaxR = colorR > valueMaxR ? colorR : valueMaxR; valueMaxG = colorG > valueMaxG ? colorG : valueMaxG; valueMaxB = colorB > valueMaxB ? colorB : valueMaxB; } /************************************************** * We create a greyscale image and detect the scene * contours using Canny-Edge detection. After we * found the contours, we filter out the color * squares of the color chart **************************************************/ // Create greyscale image cv::Mat scene(imageSize, CV_8UC1, cv::Scalar(0)); unsigned char *sceneData = scene.data; // Calculate greyscale image (auto contrast) #pragma omp parallel for schedule(static) for (unsigned int i = 0; i < imageArea; i++) { const unsigned int index = i * 3; const double colorR = (imageData[index + 0] - valueMinR) / (valueMaxR - valueMinR); const double colorG = (imageData[index + 1] - valueMinG) / (valueMaxG - valueMinG); const double colorB = (imageData[index + 2] - valueMinB) / (valueMaxB - valueMinB); const double luma = (0.2126 * colorR) + (0.7152 * colorG) + (0.0722 * colorB); sceneData[i] = (unsigned char) lround(luma * 255.0); } // Find contours from edges cv::GaussianBlur(scene, scene, cv::Size(3, 3), 0, 0); cv::Canny(scene, scene, 10, 30, 3); std::vector<std::vector<cv::Point> > contours; cv::findContours(scene, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE); scene.release(); // Check if we found any contours const unsigned int contoursCount = (unsigned int) contours.size(); if (contoursCount > 0) { // Array that will contain the matched rotated rectangles // and the original corner points of the contour. We will // need this data to calculate the position of the color chart. std::vector<cv::Point> points; std::vector<cv::RotatedRect> rectangles; double rectanglesArea = 0; double rectanglesRatio = 0; // Iterate over contours for (unsigned int i = 0; i < contoursCount; i++) { // Fetch contour, calculate perimeter const cv::Mat countour(contours[i]); const double contourLength = cv::arcLength(countour, true); // Approximate the contour to a polygon std::vector<cv::Point> approx; cv::approxPolyDP(countour, approx, contourLength * 0.02, true); // If the poly has 4 sides and is convex then it could be a match if (approx.size() == 4 && cv::isContourConvex(approx)) { // Fetch the rectangle area from the match const cv::RotatedRect rectangle = cv::minAreaRect(approx); const double rectangleArea = rectangle.size.width * rectangle.size.height; const double rectangleRatio = rectangle.size.width / rectangle.size.height; // Check if the shape is rectangular enough if (rectangleArea >= COLOR_CHART_DETECT_AREA_MIN && rectangleArea <= COLOR_CHART_DETECT_AREA_MAX && rectangleRatio >= COLOR_CHART_DETECT_RATIO_MIN && rectangleRatio <= COLOR_CHART_DETECT_RATIO_MAX) { // Store rectangles points (need them later for area calculation) points.push_back(approx[0]); points.push_back(approx[1]); points.push_back(approx[2]); points.push_back(approx[3]); rectangles.push_back(rectangle); rectanglesArea += rectangleArea; rectanglesRatio += rectangleRatio; } } } // Check if we found any rectangles const unsigned int rectanglesCount = (unsigned int) rectangles.size(); if (rectanglesCount > 0) { // Calculate thresholds const double rectangleAreaMin = (rectanglesArea / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MIN; const double rectangleAreaMax = (rectanglesArea / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MAX; const double rectangleRatioMin = (rectanglesRatio / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MIN; const double rectangleRatioMax = (rectanglesRatio / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MAX; // Final points std::vector<cv::Point> filtered; // Iterate over rectangles for (unsigned int i = 0; i < rectanglesCount; i++) { // Fetch rectangle and calculate size and ratio const cv::RotatedRect rectangle = rectangles[i]; const double rectangleArea = rectangle.size.width * rectangle.size.height; const double rectangleRatio = rectangle.size.width / rectangle.size.height; // Check if the rectangle fits the average rectangle if (rectangleArea >= rectangleAreaMin && rectangleArea <= rectangleAreaMax && rectangleRatio >= rectangleRatioMin && rectangleRatio <= rectangleRatioMax) { // If so, then add these points to the valid points list const unsigned int index = i * 4; filtered.push_back(points[index + 0]); filtered.push_back(points[index + 1]); filtered.push_back(points[index + 2]); filtered.push_back(points[index + 3]); } } /************************************************** * At this point we have filtered the contours to * a list of valid points that are the corners of * the small color squares inside the color chart. * In the next step we need to calculate the * rectangle that forms the actual color chart area. **************************************************/ // Check if we found any final points const unsigned int filteredCount = (unsigned int) filtered.size(); if (filteredCount > 0) { // Find the center point unsigned int totalX = 0; unsigned int totalY = 0; for (unsigned int i = 0; i < filteredCount; i++) { totalX += filtered[i].x; totalY += filtered[i].y; } const double centerX = (double) totalX / filteredCount; const double centerY = (double) totalY / filteredCount; const cv::Point center( (unsigned int) lround(centerX), (unsigned int) lround(centerY) ); // Sort points based on distance from center (from far to near) std::sort(filtered.begin(), filtered.end(), [centerX, centerY](const cv::Point a, const cv::Point b) { const double distanceAX = centerX - a.x; const double distanceAY = centerY - a.y; const double distanceBX = centerX - b.x; const double distanceBY = centerY - b.y; const double distanceA = std::sqrt(distanceAX * distanceAX + distanceAY * distanceAY); const double distanceB = std::sqrt(distanceBX * distanceBX + distanceBY * distanceBY); return distanceA > distanceB; }); // Create final corner points cv::Point pointA = center; cv::Point pointB = center; cv::Point pointC = center; cv::Point pointD = center; // Find corners for (unsigned int i = 0; i < filteredCount; i++) { const cv::Point point = filtered[i]; if (point.x < pointA.x && point.y < pointA.y) pointA = point; if (point.x > pointB.x && point.y < pointB.y) pointB = point; if (point.x > pointC.x && point.y > pointC.y) pointC = point; if (point.x < pointD.x && point.y > pointD.y) pointD = point; // cv::circle(imageMat, cv::Point(filtered[i].x, filtered[i].y), 5, cv::Scalar(0, 1, 0), 2, 8); } // cv::circle(imageMat, pointA, 20, cv::Scalar(1, 0, 0), 2, 8); // cv::circle(imageMat, pointB, 20, cv::Scalar(1, 0, 0), 2, 8); // cv::circle(imageMat, pointC, 20, cv::Scalar(1, 0, 0), 2, 8); // cv::circle(imageMat, pointD, 20, cv::Scalar(1, 0, 0), 2, 8); // cv::circle(imageMat, center, 10, cv::Scalar(0, 0, 1), 2, 8); // cv::imshow("image", imageMat); // cv::waitKey(0); // Store corners const cv::Point2f corners[4] = { cv::Point2f(pointA.x, pointA.y), cv::Point2f(pointB.x, pointB.y), cv::Point2f(pointC.x, pointC.y), cv::Point2f(pointD.x, pointD.y) }; // Store output size const cv::Size outputSize(COLOR_CHART_INSIDE_WIDTH, COLOR_CHART_INSIDE_HEIGHT); const cv::Point2f outputRect[4] = { cv::Point2f(0, 0), cv::Point2f(outputSize.width, 0), cv::Point2f(outputSize.width, outputSize.height), cv::Point2f(0, outputSize.height) }; // Calculate and apply perspective transform cv::Mat output; cv::Mat transform = cv::getPerspectiveTransform(corners, outputRect); cv::warpPerspective(imageMat, output, transform, outputSize, cv::INTER_LINEAR, cv::BORDER_CONSTANT, 0); /************************************************** * Right now we have a output image with the cropped * color chart (inside area). Now we can iterate * over the color squares and store the mean value. **************************************************/ // Iterate over patches for (unsigned int col = 0; col < COLOR_CHART_COLS; col++) { for (unsigned int row = 0; row < COLOR_CHART_ROWS; row++) { // Calculate position of patch const unsigned int x = col * (COLOR_CHART_PATCH_WIDTH + COLOR_CHART_PATCH_PADDING); const unsigned int y = row * (COLOR_CHART_PATCH_HEIGHT + COLOR_CHART_PATCH_PADDING); const unsigned int index = COLOR_CHART_ROWS * col + row; // Rectangle area of the patch const cv::Rect rect( x + 10, y + 10, COLOR_CHART_PATCH_WIDTH - 20, COLOR_CHART_PATCH_HEIGHT - 20 ); // Calculate mean color from area const cv::Scalar color = cv::mean(cv::Mat(output, rect)); // Set color patches[index]->setRGB( color[0], color[1], color[2] ); } } } } } return chart; } } #endif // CAPTURE3_COLOR_CHART_UTILS_H
GB_unop__identity_fc64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc64_uint64 // op(A') function: GB_unop_tran__identity_fc64_uint64 // C type: GxB_FC64_t // A type: uint64_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc64_uint64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dia.h
#ifndef __DIA_H #define __DIA_H #include <algorithm> #include "complex_ops.h" #include "utils.h" #include "numpy/ndarraytypes.h" template <typename I, typename T1, typename T2> void dia_matvec_noomp_contig(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const T2 x[], T2 y[]) { if(overwrite_y){ for(I i = 0; i < n_row; i++){ y[i] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + (npy_intp)i*L + j_start; const T2 * x_row = x + j_start; T2 * y_row = y + i_start; for(I n = 0; n < N; n++){ y_row[n] += (a * diag[n]) * x_row[n]; } } } template <typename I, typename T1, typename T2> void dia_matvec_noomp_strided(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { if(overwrite_y){ for(I i = 0; i < n_row; i++){ y[i] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + (npy_intp)i*L + j_start; const T2 * x_row = x + j_start * x_stride; T2 * y_row = y + i_start * y_stride; for(I n = 0; n < N; n++){ y_row[n * y_stride] += (a * diag[n]) * x_row[n * x_stride]; } } } template <typename I, typename T1, typename T2> void dia_matvecs_noomp_strided(const bool overwrite_y, const I n_row, const I n_col, const npy_intp n_vecs, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride_row, const npy_intp x_stride_col, const T2 x[], const npy_intp y_stride_row, const npy_intp y_stride_col, T2 y[]) { if(overwrite_y){ const npy_intp n = (npy_intp)n_row * n_vecs; for(npy_intp i = 0; i < n; i++){ y[i] = 0; } } if(y_stride_col < y_stride_row){ for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + (npy_intp)i*L + j_start; const T2 * x_row = x + j_start * x_stride_row; T2 * y_row = y + i_start * y_stride_row; for(I n = 0; n < N; n++){ axpy_strided((npy_intp)n_vecs,T2(a * diag[n]),x_stride_col,x_row,y_stride_col,y_row); x_row += x_stride_row; y_row += y_stride_row; } } } else{ for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + (npy_intp)i*L + j_start; const T2 * x_start = x + j_start * x_stride_row; T2 * y_start = y + i_start * y_stride_row; for(I m=0;m<n_vecs;m++){ const T2 * x_row = x_start; T2 * y_row = y_start; for(I n = 0; n < N; n++){ (*y_row) += (a * diag[n]) * (*x_row); x_row += x_stride_row; y_row += y_stride_row; } y_start += y_stride_col; x_start += x_stride_col; } } } } #if defined(_OPENMP) #include "csrmv_merge.h" #include "openmp.h" template <typename I, typename T1, typename T2> void dia_matvec_omp_contig(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const T2 x[], T2 y[]) { #pragma omp parallel { if(overwrite_y){ #pragma omp for schedule(static) for(I n=0;n<n_row;n++){ y[n] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + i*L + j_start; const T2 * x_row = x + j_start; T2 * y_row = y + i_start; #pragma omp for schedule(static) for(I n=0;n<N;n++){ y_row[n] += (a * diag[n]) * x_row[n]; } } } } template <typename I, typename T1, typename T2> void dia_matvec_omp_strided(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { #pragma omp parallel { if(overwrite_y){ #pragma omp for schedule(static) for(I n=0;n<n_row;n++){ y[n * y_stride] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + i*L + j_start; const T2 * x_row = x + j_start * x_stride; T2 * y_row = y + i_start * y_stride; #pragma omp for schedule(static) for(I n=0;n<N;n++){ y_row[n * y_stride] += (a * diag[n]) * x_row[n * x_stride]; } } } } template <typename I, typename T1, typename T2> inline void dia_matvecs_omp_strided(const bool overwrite_y, const I n_row, const I n_col, const npy_intp n_vecs, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride_row, const npy_intp x_stride_col, const T2 x[], const npy_intp y_stride_row, const npy_intp y_stride_col, T2 y[]) { dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } #else template <typename I, typename T1, typename T2> inline void dia_matvec_omp_contig(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const T2 x[], T2 y[]) { dia_matvec_noomp_contig(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x,y); } template <typename I, typename T1, typename T2> inline void dia_matvec_omp_strided(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { dia_matvec_noomp_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x_stride,x,y_stride,y); } template <typename I, typename T1, typename T2> inline void dia_matvecs_omp_strided(const bool overwrite_y, const I n_row, const I n_col, const npy_intp n_vecs, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride_row, const npy_intp x_stride_col, const T2 x[], const npy_intp y_stride_row, const npy_intp y_stride_col, T2 y[]) { dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } #endif template <typename I, typename T1, typename T2> void dia_matvec_omp(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride_bytes, const T2 x[], const npy_intp y_stride_bytes, T2 y[]) { const npy_intp x_stride = x_stride_bytes/sizeof(T2); const npy_intp y_stride = y_stride_bytes/sizeof(T2); if(y_stride==1){ if(x_stride==1){ dia_matvec_omp_contig(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x,y); } else{ dia_matvec_omp_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x_stride,x,1,y); } } else{ if(x_stride==1){ dia_matvec_omp_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,1,x,y_stride,y); } else{ dia_matvec_omp_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x_stride,x,y_stride,y); } } } template <typename I, typename T1, typename T2> void dia_matvec_noomp(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride_bytes, const T2 x[], const npy_intp y_stride_bytes, T2 y[]) { const npy_intp x_stride = x_stride_bytes/sizeof(T2); const npy_intp y_stride = y_stride_bytes/sizeof(T2); if(y_stride==1){ if(x_stride==1){ dia_matvec_noomp_contig(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x,y); } else{ dia_matvec_noomp_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x_stride,x,1,y); } } else{ if(x_stride==1){ dia_matvec_noomp_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,1,x,y_stride,y); } else{ dia_matvec_noomp_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x_stride,x,y_stride,y); } } } template<typename I, typename T1,typename T2> inline void dia_matvecs_omp(const bool overwrite_y, const I n_row, const I n_col, const npy_intp n_vecs, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride_row_byte, const npy_intp x_stride_col_byte, const T2 x[], const npy_intp y_stride_row_byte, const npy_intp y_stride_col_byte, T2 y[]) { const npy_intp y_stride_row = y_stride_row_byte/sizeof(T2); const npy_intp y_stride_col = y_stride_col_byte/sizeof(T2); const npy_intp x_stride_row = x_stride_row_byte/sizeof(T2); const npy_intp x_stride_col = x_stride_col_byte/sizeof(T2); if(y_stride_col==1){ if(x_stride_col==1){ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,1,x,y_stride_row,1,y); } else if(x_stride_row==1){ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,1,x_stride_col,x,y_stride_row,1,y); } else{ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,y_stride_row,1,y); } } else if(y_stride_row==1){ if(x_stride_col==1){ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,1,x,1,y_stride_col,y); } else if(x_stride_row==1){ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,1,x_stride_col,x,1,y_stride_col,y); } else{ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,1,y_stride_col,y); } } else{ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } } template<typename I, typename T1,typename T2> inline void dia_matvecs_noomp(const bool overwrite_y, const I n_row, const I n_col, const npy_intp n_vecs, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride_row_byte, const npy_intp x_stride_col_byte, const T2 x[], const npy_intp y_stride_row_byte, const npy_intp y_stride_col_byte, T2 y[]) { const npy_intp y_stride_row = y_stride_row_byte/sizeof(T2); const npy_intp y_stride_col = y_stride_col_byte/sizeof(T2); const npy_intp x_stride_row = x_stride_row_byte/sizeof(T2); const npy_intp x_stride_col = x_stride_col_byte/sizeof(T2); if(y_stride_col==1){ if(x_stride_col==1){ dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,1,x,y_stride_row,1,y); } else if(x_stride_row==1){ dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,1,x_stride_col,x,y_stride_row,1,y); } else{ dia_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,y_stride_row,1,y); } } else if(y_stride_row==1){ if(x_stride_col==1){ dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,1,x,1,y_stride_col,y); } else if(x_stride_row==1){ dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,1,x_stride_col,x,1,y_stride_col,y); } else{ dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,1,y_stride_col,y); } } else{ dia_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,n_diags,L,offsets,diags,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y); } } #endif
Example_nthrs_dynamic.1.c
/* * @@name: nthrs_dynamic.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ #include <omp.h> int main() { omp_set_dynamic(0); #pragma omp parallel num_threads(10) { /* do work here */ } return 0; }
main.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <omp.h> #include <fcntl.h> #include <unistd.h> #define N1 2 #define N2 4 #define N3 8 #define N4 16 int size = 0; int trueSize; int* loadArray(int* arr, char** argv, int N){ FILE* f = fopen(argv[1], "r"); int num; arr = (int*)malloc(sizeof(int) * 1); while(fscanf(f, "%d", &num) == 1){ arr[size] = num; size++; arr = realloc(arr, sizeof(int) * (size + 1)); } trueSize = size + 1; int n = (int)ceil(log2(size)); int pow = 1<<n; arr = realloc(arr, sizeof(int) * pow); for(; size < pow; size++){ arr[size] = 0; } return arr; } //Utility function to print an array printArray(int* arr){ for(int i = 0; i < trueSize; i++){ printf("%d ", arr[i]); } printf("\n"); } //Utility function to find the prefix sum of an array of numbers int* prefix(int* arr, int N){ int last = arr[size - 1]; omp_set_num_threads(N); //int p = 0; int p = 1; int p_ = (int)log2(size); for(int d = 0; d <= p_ - 1; d++){ p *= 2; #pragma omp parallel shared(arr, p) { #pragma omp for for(int i = 0; i <= (size - 1); i += p){ arr[i + p -1] = arr[i + p/2 - 1] + arr[i + p -1]; } } } arr[size - 1] = 0; int temp; for(int d = p_ - 1; d >=0; d--){ p /= 2; #pragma omp parallel shared(arr, p, temp) { #pragma omp for for(int i = 0; i <= (size - 1); i+=2 * p){ temp = arr[i + p -1]; arr[i + p - 1] = arr[i + 2 * p -1]; arr[i + 2 * p -1] = temp + arr[i + 2 * p -1]; } } } int* prefSum = (int*)malloc(sizeof(int) * size); #pragma omp parallel shared(prefSum) { #pragma omp for for(int i = 0; i < size - 1; i++){ prefSum[i] = arr[i + 1]; } } prefSum[size - 1] = prefSum[size - 2] + last; return prefSum; } //Function that simulates data compaction int* compact(int* A, int N){ int* A_ = (int*)calloc(sizeof(int), size); int* Aux = (int*)calloc(sizeof(int), size); int* B = (int*)calloc(sizeof(int), size); omp_set_num_threads(N); #pragma omp parallel shared(A, A_) { #pragma omp for for(int i = 0; i < size; i++){ if(A[i] != 0){ A_[i] = 1; //Adding 1 wherever data is present } } } //Prefix sum Aux = prefix(A_, N); #pragma omp parallel shared(A, B, Aux) { #pragma omp for for(int i = 0; i < size; i++){ if(A[i] != 0){ B[Aux[i] - 1] = A[i]; } } } return B; } int main(int argc, char* argv[]){ if(argc == 1){ printf("Please supply the file whose data needs to be compacted\n"); }else if(argc > 2){ printf("Can compact data of only one file at a time\n"); }else{ int* arr; int N; printf("Enter the number of threads you want : "); scanf("%d", &N); arr = loadArray(arr, argv, N); int* b = compact(arr, N); FILE* file = fopen("Compacted_Array.txt", "w"); fclose(file); int f = open("Compacted_Array.txt", O_WRONLY | O_CREAT, 0777); dup2(f, STDOUT_FILENO); close(f); printArray(b); } }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantumScale*alpha * QuantumScale*beta; ** opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also define that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { double SaSca; if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); SaSca=Sa*PerceptibleReciprocal(Sca); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da*PerceptibleReciprocal(Dca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p+q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p-q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)*PerceptibleReciprocal(Da)+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { MagickRealType alpha, beta; alpha=Dca*PerceptibleReciprocal(Da); if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)*PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) memcpy(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) memcpy(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; value=GetImageArtifact(image,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) != MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case StereoCompositeOp: { composite.red=(MagickRealType) GetPixelRed(p); composite.opacity=(composite.opacity+canvas.opacity/2); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=QuantumRange-source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) memcpy(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) memcpy(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
point_relax.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.37 $ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" /* this currently cannot be greater than 7 */ #ifdef MAX_DEPTH #undef MAX_DEPTH #endif #define MAX_DEPTH 7 /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ typedef struct { MPI_Comm comm; double tol; /* tolerance, set =0 for no convergence testing */ double rresnorm; /* relative residual norm, computed only if tol>0.0 */ HYPRE_Int max_iter; HYPRE_Int rel_change; /* not yet used */ HYPRE_Int zero_guess; double weight; HYPRE_Int num_pointsets; HYPRE_Int *pointset_sizes; HYPRE_Int *pointset_ranks; hypre_Index *pointset_strides; hypre_Index **pointset_indices; hypre_StructMatrix *A; hypre_StructVector *b; hypre_StructVector *x; hypre_StructVector *t; HYPRE_Int diag_rank; hypre_ComputePkg **compute_pkgs; /* log info (always logged) */ HYPRE_Int num_iterations; HYPRE_Int time_index; HYPRE_Int flops; } hypre_PointRelaxData; /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ void * hypre_PointRelaxCreate( MPI_Comm comm ) { hypre_PointRelaxData *relax_data; hypre_Index stride; hypre_Index indices[1]; relax_data = hypre_CTAlloc(hypre_PointRelaxData, 1); (relax_data -> comm) = comm; (relax_data -> time_index) = hypre_InitializeTiming("PointRelax"); /* set defaults */ (relax_data -> tol) = 0.0; /* tol=0 means no convergence testing */ (relax_data -> rresnorm) = 0.0; (relax_data -> max_iter) = 1000; (relax_data -> rel_change) = 0; (relax_data -> zero_guess) = 0; (relax_data -> weight) = 1.0; (relax_data -> num_pointsets) = 0; (relax_data -> pointset_sizes) = NULL; (relax_data -> pointset_ranks) = NULL; (relax_data -> pointset_strides) = NULL; (relax_data -> pointset_indices) = NULL; (relax_data -> A) = NULL; (relax_data -> b) = NULL; (relax_data -> x) = NULL; (relax_data -> t) = NULL; (relax_data -> compute_pkgs) = NULL; hypre_SetIndex(stride, 1, 1, 1); hypre_SetIndex(indices[0], 0, 0, 0); hypre_PointRelaxSetNumPointsets((void *) relax_data, 1); hypre_PointRelaxSetPointset((void *) relax_data, 0, 1, stride, indices); return (void *) relax_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxDestroy( void *relax_vdata ) { hypre_PointRelaxData *relax_data = relax_vdata; HYPRE_Int i; if (relax_data) { for (i = 0; i < (relax_data -> num_pointsets); i++) { hypre_TFree(relax_data -> pointset_indices[i]); } if (relax_data -> compute_pkgs) { for (i = 0; i < (relax_data -> num_pointsets); i++) { hypre_ComputePkgDestroy(relax_data -> compute_pkgs[i]); } } hypre_TFree(relax_data -> pointset_sizes); hypre_TFree(relax_data -> pointset_ranks); hypre_TFree(relax_data -> pointset_strides); hypre_TFree(relax_data -> pointset_indices); hypre_StructMatrixDestroy(relax_data -> A); hypre_StructVectorDestroy(relax_data -> b); hypre_StructVectorDestroy(relax_data -> x); hypre_StructVectorDestroy(relax_data -> t); hypre_TFree(relax_data -> compute_pkgs); hypre_FinalizeTiming(relax_data -> time_index); hypre_TFree(relax_data); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetup( void *relax_vdata, hypre_StructMatrix *A, hypre_StructVector *b, hypre_StructVector *x ) { hypre_PointRelaxData *relax_data = relax_vdata; HYPRE_Int num_pointsets = (relax_data -> num_pointsets); HYPRE_Int *pointset_sizes = (relax_data -> pointset_sizes); hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_Index **pointset_indices = (relax_data -> pointset_indices); hypre_StructVector *t; HYPRE_Int diag_rank; hypre_ComputeInfo *compute_info; hypre_ComputePkg **compute_pkgs; hypre_Index diag_index; hypre_IndexRef stride; hypre_IndexRef index; hypre_StructGrid *grid; hypre_StructStencil *stencil; hypre_BoxArrayArray *orig_indt_boxes; hypre_BoxArrayArray *orig_dept_boxes; hypre_BoxArrayArray *box_aa; hypre_BoxArray *box_a; hypre_Box *box; HYPRE_Int box_aa_size; HYPRE_Int box_a_size; hypre_BoxArrayArray *new_box_aa; hypre_BoxArray *new_box_a; hypre_Box *new_box; double scale; HYPRE_Int frac; HYPRE_Int i, j, k, p, m, compute_i; /*---------------------------------------------------------- * Set up the temp vector *----------------------------------------------------------*/ if ((relax_data -> t) == NULL) { t = hypre_StructVectorCreate(hypre_StructVectorComm(b), hypre_StructVectorGrid(b)); hypre_StructVectorSetNumGhost(t, hypre_StructVectorNumGhost(b)); hypre_StructVectorInitialize(t); hypre_StructVectorAssemble(t); (relax_data -> t) = t; } /*---------------------------------------------------------- * Find the matrix diagonal *----------------------------------------------------------*/ grid = hypre_StructMatrixGrid(A); stencil = hypre_StructMatrixStencil(A); hypre_SetIndex(diag_index, 0, 0, 0); diag_rank = hypre_StructStencilElementRank(stencil, diag_index); /*---------------------------------------------------------- * Set up the compute packages *----------------------------------------------------------*/ compute_pkgs = hypre_CTAlloc(hypre_ComputePkg *, num_pointsets); for (p = 0; p < num_pointsets; p++) { hypre_CreateComputeInfo(grid, stencil, &compute_info); orig_indt_boxes = hypre_ComputeInfoIndtBoxes(compute_info); orig_dept_boxes = hypre_ComputeInfoDeptBoxes(compute_info); stride = pointset_strides[p]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: box_aa = orig_indt_boxes; break; case 1: box_aa = orig_dept_boxes; break; } box_aa_size = hypre_BoxArrayArraySize(box_aa); new_box_aa = hypre_BoxArrayArrayCreate(box_aa_size); for (i = 0; i < box_aa_size; i++) { box_a = hypre_BoxArrayArrayBoxArray(box_aa, i); box_a_size = hypre_BoxArraySize(box_a); new_box_a = hypre_BoxArrayArrayBoxArray(new_box_aa, i); hypre_BoxArraySetSize(new_box_a, box_a_size * pointset_sizes[p]); k = 0; for (m = 0; m < pointset_sizes[p]; m++) { index = pointset_indices[p][m]; for (j = 0; j < box_a_size; j++) { box = hypre_BoxArrayBox(box_a, j); new_box = hypre_BoxArrayBox(new_box_a, k); hypre_CopyBox(box, new_box); hypre_ProjectBox(new_box, index, stride); k++; } } } switch(compute_i) { case 0: hypre_ComputeInfoIndtBoxes(compute_info) = new_box_aa; break; case 1: hypre_ComputeInfoDeptBoxes(compute_info) = new_box_aa; break; } } hypre_CopyIndex(stride, hypre_ComputeInfoStride(compute_info)); hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1, grid, &compute_pkgs[p]); hypre_BoxArrayArrayDestroy(orig_indt_boxes); hypre_BoxArrayArrayDestroy(orig_dept_boxes); } /*---------------------------------------------------------- * Set up the relax data structure *----------------------------------------------------------*/ (relax_data -> A) = hypre_StructMatrixRef(A); (relax_data -> x) = hypre_StructVectorRef(x); (relax_data -> b) = hypre_StructVectorRef(b); (relax_data -> diag_rank) = diag_rank; (relax_data -> compute_pkgs) = compute_pkgs; /*----------------------------------------------------- * Compute flops *-----------------------------------------------------*/ scale = 0.0; for (p = 0; p < num_pointsets; p++) { stride = pointset_strides[p]; frac = hypre_IndexX(stride); frac *= hypre_IndexY(stride); frac *= hypre_IndexZ(stride); scale += (pointset_sizes[p] / frac); } (relax_data -> flops) = scale * (hypre_StructMatrixGlobalSize(A) + hypre_StructVectorGlobalSize(x)); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelax( void *relax_vdata, hypre_StructMatrix *A, hypre_StructVector *b, hypre_StructVector *x ) { hypre_PointRelaxData *relax_data = relax_vdata; HYPRE_Int max_iter = (relax_data -> max_iter); HYPRE_Int zero_guess = (relax_data -> zero_guess); double weight = (relax_data -> weight); HYPRE_Int num_pointsets = (relax_data -> num_pointsets); HYPRE_Int *pointset_ranks = (relax_data -> pointset_ranks); hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_StructVector *t = (relax_data -> t); HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_ComputePkg **compute_pkgs = (relax_data -> compute_pkgs); double tol = (relax_data -> tol); double tol2 = tol*tol; hypre_ComputePkg *compute_pkg; hypre_CommHandle *comm_handle; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *A_data_box; hypre_Box *b_data_box; hypre_Box *x_data_box; hypre_Box *t_data_box; double *Ap; double AAp0; double *bp; double *xp; double *tp; void *matvec_data = NULL; HYPRE_Int Ai; HYPRE_Int bi; HYPRE_Int xi; HYPRE_Int ti; hypre_IndexRef stride; hypre_IndexRef start; hypre_Index loop_size; HYPRE_Int constant_coefficient; HYPRE_Int iter, p, compute_i, i, j; HYPRE_Int pointset; double bsumsq, rsumsq; /*---------------------------------------------------------- * Initialize some things and deal with special cases *----------------------------------------------------------*/ hypre_BeginTiming(relax_data -> time_index); hypre_StructMatrixDestroy(relax_data -> A); hypre_StructVectorDestroy(relax_data -> b); hypre_StructVectorDestroy(relax_data -> x); (relax_data -> A) = hypre_StructMatrixRef(A); (relax_data -> x) = hypre_StructVectorRef(x); (relax_data -> b) = hypre_StructVectorRef(b); (relax_data -> num_iterations) = 0; /* if max_iter is zero, return */ if (max_iter == 0) { /* if using a zero initial guess, return zero */ if (zero_guess) { hypre_StructVectorSetConstantValues(x, 0.0); } hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } constant_coefficient = hypre_StructMatrixConstantCoefficient(A); if (constant_coefficient) hypre_StructVectorClearBoundGhostValues(x, 0); rsumsq = 0.0; if ( tol>0.0 ) bsumsq = hypre_StructInnerProd( b, b ); /*---------------------------------------------------------- * Do zero_guess iteration *----------------------------------------------------------*/ p = 0; iter = 0; if ( tol>0.0) { matvec_data = hypre_StructMatvecCreate(); hypre_StructMatvecSetup( matvec_data, A, x ); } if (zero_guess) { if ( p==0 ) rsumsq = 0.0; if (num_pointsets > 1) { hypre_StructVectorSetConstantValues(x, 0.0); } pointset = pointset_ranks[p]; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); Ap = hypre_StructMatrixBoxData(A, i, diag_rank); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); /* all matrix coefficients are constant */ if ( constant_coefficient==1 ) { Ai = hypre_CCBoxIndexRank( A_data_box, start ); AAp0 = 1/Ap[Ai]; hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size, b_data_box, start, stride, bi, x_data_box, start, stride, xi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,xi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, xi) { xp[xi] = bp[bi] * AAp0; } hypre_BoxLoop2End(bi, xi); } /* constant_coefficent 0 (variable) or 2 (variable diagonal only) are the same for the diagonal */ else { hypre_BoxLoop3Begin(hypre_StructVectorDim(x), loop_size, A_data_box, start, stride, Ai, b_data_box, start, stride, bi, x_data_box, start, stride, xi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,bi,xi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, bi, xi) { xp[xi] = bp[bi] / Ap[Ai]; } hypre_BoxLoop3End(Ai, bi, xi); } } } } if (weight != 1.0) { hypre_StructScale(weight, x); } p = (p + 1) % num_pointsets; iter = iter + (p == 0); if ( tol>0.0 && p==0 ) /* ... p==0 here means we've finished going through all the pointsets, i.e. this iteration is complete. tol>0.0 means to do a convergence test, using tol. The test is simply ||r||/||b||<tol, where r=residual, b=r.h.s., unweighted L2 norm */ { hypre_StructCopy( b, t ); /* t = b */ hypre_StructMatvecCompute( matvec_data, -1.0, A, x, 1.0, t ); /* t = - A x + t = - A x + b */ rsumsq = hypre_StructInnerProd( t, t ); /* <t,t> */ if ( rsumsq/bsumsq<tol2 ) max_iter = iter; /* converged; reset max_iter to prevent more iterations */ } } /*---------------------------------------------------------- * Do regular iterations *----------------------------------------------------------*/ while (iter < max_iter) { if ( p==0 ) rsumsq = 0.0; pointset = pointset_ranks[p]; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; /*hypre_StructCopy(x, t); ... not needed as long as the copy at the end of the loop is restricted to the current pointset (hypre_relax_copy, hypre_relax_wtx */ for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { xp = hypre_StructVectorData(x); hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle); compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { hypre_FinalizeIndtComputations(comm_handle); compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); t_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(t), i); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); tp = hypre_StructVectorBoxData(t, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); if ( constant_coefficient==1 || constant_coefficient==2 ) { hypre_PointRelax_core12( relax_vdata, A, constant_coefficient, compute_box, bp, xp, tp, i, A_data_box, b_data_box, x_data_box, t_data_box, stride ); } else { hypre_PointRelax_core0( relax_vdata, A, constant_coefficient, compute_box, bp, xp, tp, i, A_data_box, b_data_box, x_data_box, t_data_box, stride ); } Ap = hypre_StructMatrixBoxData(A, i, diag_rank); if ( constant_coefficient==0 || constant_coefficient==2 ) /* divide by the variable diagonal */ { start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size, A_data_box, start, stride, Ai, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(Ai, ti) { tp[ti] /= Ap[Ai]; } hypre_BoxLoop2End(Ai, ti); } } } } if (weight != 1.0) { /* hypre_StructScale((1.0 - weight), x); hypre_StructAxpy(weight, t, x);*/ hypre_relax_wtx( relax_data, pointset, t, x ); /* x=w*t+(1-w)*x on pointset */ } else { hypre_relax_copy( relax_data, pointset, t, x ); /* x=t on pointset */ /* hypre_StructCopy(t, x);*/ } p = (p + 1) % num_pointsets; iter = iter + (p == 0); if ( tol>0.0 && p==0 ) /* ... p==0 here means we've finished going through all the pointsets, i.e. this iteration is complete. tol>0.0 means to do a convergence test, using tol. The test is simply ||r||/||b||<tol, where r=residual, b=r.h.s., unweighted L2 norm */ { hypre_StructCopy( b, t ); /* t = b */ hypre_StructMatvecCompute( matvec_data, -1.0, A, x, 1.0, t ); /* t = - A x + t = - A x + b */ rsumsq = hypre_StructInnerProd( t, t ); /* <t,t> */ if ( rsumsq/bsumsq<tol2 ) break; } } if ( tol>0.0 ) { hypre_StructMatvecDestroy( matvec_data ); } if ( tol>0.0 ) (relax_data -> rresnorm) = sqrt( rsumsq/bsumsq ); (relax_data -> num_iterations) = iter; /*----------------------------------------------------------------------- * Return *-----------------------------------------------------------------------*/ hypre_IncFLOPCount(relax_data -> flops); hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } /* for constant_coefficient==0, all coefficients may vary ...*/ HYPRE_Int hypre_PointRelax_core0( void *relax_vdata, hypre_StructMatrix *A, HYPRE_Int constant_coefficient, hypre_Box *compute_box, double *bp, double *xp, double *tp, HYPRE_Int boxarray_id, hypre_Box *A_data_box, hypre_Box *b_data_box, hypre_Box *x_data_box, hypre_Box *t_data_box, hypre_IndexRef stride ) { hypre_PointRelaxData *relax_data = relax_vdata; double *Ap0; double *Ap1; double *Ap2; double *Ap3; double *Ap4; double *Ap5; double *Ap6; HYPRE_Int xoff0; HYPRE_Int xoff1; HYPRE_Int xoff2; HYPRE_Int xoff3; HYPRE_Int xoff4; HYPRE_Int xoff5; HYPRE_Int xoff6; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_IndexRef start; hypre_Index loop_size; HYPRE_Int si, sk, ssi[MAX_DEPTH], depth, k; HYPRE_Int Ai; HYPRE_Int bi; HYPRE_Int xi; HYPRE_Int ti; stencil = hypre_StructMatrixStencil(A); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, b_data_box, start, stride, bi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, ti) { tp[ti] = bp[bi]; } hypre_BoxLoop2End(bi, ti); /* unroll up to depth MAX_DEPTH */ for (si = 0; si < stencil_size; si += MAX_DEPTH) { depth = hypre_min(MAX_DEPTH, (stencil_size - si)); for (k = 0, sk = si; k < depth; sk++) { if (sk == diag_rank) { depth--; } else { ssi[k] = sk; k++; } } switch(depth) { case 7: Ap6 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[6]); xoff6 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[6]]); case 6: Ap5 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[5]); xoff5 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[5]]); case 5: Ap4 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[4]); xoff4 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[4]]); case 4: Ap3 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[3]); xoff3 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[3]]); case 3: Ap2 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[2]); xoff2 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[2]]); case 2: Ap1 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[1]); xoff1 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[1]]); case 1: Ap0 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[0]); xoff0 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[0]]); case 0: break; } switch(depth) { case 7: hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3] + Ap4[Ai] * xp[xi + xoff4] + Ap5[Ai] * xp[xi + xoff5] + Ap6[Ai] * xp[xi + xoff6]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 6: hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3] + Ap4[Ai] * xp[xi + xoff4] + Ap5[Ai] * xp[xi + xoff5]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 5: hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3] + Ap4[Ai] * xp[xi + xoff4]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 4: hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 3: hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 2: hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 1: hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 0: break; } } return hypre_error_flag; } /* for constant_coefficient==1 or 2, all offdiagonal coefficients constant over space ...*/ HYPRE_Int hypre_PointRelax_core12( void *relax_vdata, hypre_StructMatrix *A, HYPRE_Int constant_coefficient, hypre_Box *compute_box, double *bp, double *xp, double *tp, HYPRE_Int boxarray_id, hypre_Box *A_data_box, hypre_Box *b_data_box, hypre_Box *x_data_box, hypre_Box *t_data_box, hypre_IndexRef stride ) { hypre_PointRelaxData *relax_data = relax_vdata; double *Apd; double *Ap0; double *Ap1; double *Ap2; double *Ap3; double *Ap4; double *Ap5; double *Ap6; double AAp0; double AAp1; double AAp2; double AAp3; double AAp4; double AAp5; double AAp6; double AApd; HYPRE_Int xoff0; HYPRE_Int xoff1; HYPRE_Int xoff2; HYPRE_Int xoff3; HYPRE_Int xoff4; HYPRE_Int xoff5; HYPRE_Int xoff6; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_IndexRef start; hypre_Index loop_size; HYPRE_Int si, sk, ssi[MAX_DEPTH], depth, k; HYPRE_Int Ai; HYPRE_Int bi; HYPRE_Int xi; HYPRE_Int ti; stencil = hypre_StructMatrixStencil(A); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); /* The standard (variable coefficient) algorithm initializes tp=bp. Do it here, but for constant diagonal, also divide by the diagonal (and set up AApd for other division-equivalents. For a variable diagonal, this diagonal division is done at the end of the computation. */ Ai = hypre_CCBoxIndexRank( A_data_box, start ); if ( constant_coefficient==1 ) /* constant diagonal */ { Apd = hypre_StructMatrixBoxData(A, boxarray_id, diag_rank); AApd = 1/Apd[Ai]; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, b_data_box, start, stride, bi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, ti) { tp[ti] = AApd * bp[bi]; } hypre_BoxLoop2End(bi, ti); } else /* constant_coefficient==2, variable diagonal */ { AApd = 1; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, b_data_box, start, stride, bi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, ti) { tp[ti] = bp[bi]; } hypre_BoxLoop2End(bi, ti); } /* unroll up to depth MAX_DEPTH */ for (si = 0; si < stencil_size; si += MAX_DEPTH) { depth = hypre_min(MAX_DEPTH, (stencil_size - si)); for (k = 0, sk = si; k < depth; sk++) { if (sk == diag_rank) { depth--; } else { ssi[k] = sk; k++; } } switch(depth) { case 7: Ap6 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[6]); xoff6 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[6]]); case 6: Ap5 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[5]); xoff5 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[5]]); case 5: Ap4 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[4]); xoff4 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[4]]); case 4: Ap3 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[3]); xoff3 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[3]]); case 3: Ap2 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[2]); xoff2 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[2]]); case 2: Ap1 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[1]); xoff1 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[1]]); case 1: Ap0 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[0]); xoff0 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[0]]); case 0: break; } switch(depth) { case 7: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; AAp4 = Ap4[Ai]*AApd; AAp5 = Ap5[Ai]*AApd; AAp6 = Ap6[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3] + AAp4 * xp[xi + xoff4] + AAp5 * xp[xi + xoff5] + AAp6 * xp[xi + xoff6]; } hypre_BoxLoop2End(xi, ti); break; case 6: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; AAp4 = Ap4[Ai]*AApd; AAp5 = Ap5[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3] + AAp4 * xp[xi + xoff4] + AAp5 * xp[xi + xoff5]; } hypre_BoxLoop2End(xi, ti); break; case 5: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; AAp4 = Ap4[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3] + AAp4 * xp[xi + xoff4]; } hypre_BoxLoop2End(xi, ti); break; case 4: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3]; } hypre_BoxLoop2End(xi, ti); break; case 3: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2]; } hypre_BoxLoop2End(xi, ti); break; case 2: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1]; } hypre_BoxLoop2End(xi, ti); break; case 1: AAp0 = Ap0[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0]; } hypre_BoxLoop2End(xi, ti); break; case 0: break; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetTol( void *relax_vdata, double tol ) { hypre_PointRelaxData *relax_data = relax_vdata; (relax_data -> tol) = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetTol( void *relax_vdata, double *tol ) { hypre_PointRelaxData *relax_data = relax_vdata; *tol = (relax_data -> tol); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetMaxIter( void *relax_vdata, HYPRE_Int max_iter ) { hypre_PointRelaxData *relax_data = relax_vdata; (relax_data -> max_iter) = max_iter; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetMaxIter( void *relax_vdata, HYPRE_Int * max_iter ) { hypre_PointRelaxData *relax_data = relax_vdata; *max_iter = (relax_data -> max_iter); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetZeroGuess( void *relax_vdata, HYPRE_Int zero_guess ) { hypre_PointRelaxData *relax_data = relax_vdata; (relax_data -> zero_guess) = zero_guess; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetZeroGuess( void *relax_vdata, HYPRE_Int * zero_guess ) { hypre_PointRelaxData *relax_data = relax_vdata; *zero_guess = (relax_data -> zero_guess); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetNumIterations( void *relax_vdata, HYPRE_Int * num_iterations ) { hypre_PointRelaxData *relax_data = relax_vdata; *num_iterations = (relax_data -> num_iterations); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetWeight( void *relax_vdata, double weight ) { hypre_PointRelaxData *relax_data = relax_vdata; (relax_data -> weight) = weight; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetNumPointsets( void *relax_vdata, HYPRE_Int num_pointsets ) { hypre_PointRelaxData *relax_data = relax_vdata; HYPRE_Int i; /* free up old pointset memory */ for (i = 0; i < (relax_data -> num_pointsets); i++) { hypre_TFree(relax_data -> pointset_indices[i]); } hypre_TFree(relax_data -> pointset_sizes); hypre_TFree(relax_data -> pointset_ranks); hypre_TFree(relax_data -> pointset_strides); hypre_TFree(relax_data -> pointset_indices); /* alloc new pointset memory */ (relax_data -> num_pointsets) = num_pointsets; (relax_data -> pointset_sizes) = hypre_TAlloc(HYPRE_Int, num_pointsets); (relax_data -> pointset_ranks) = hypre_TAlloc(HYPRE_Int, num_pointsets); (relax_data -> pointset_strides) = hypre_TAlloc(hypre_Index, num_pointsets); (relax_data -> pointset_indices) = hypre_TAlloc(hypre_Index *, num_pointsets); for (i = 0; i < num_pointsets; i++) { (relax_data -> pointset_sizes[i]) = 0; (relax_data -> pointset_ranks[i]) = i; (relax_data -> pointset_indices[i]) = NULL; } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetPointset( void *relax_vdata, HYPRE_Int pointset, HYPRE_Int pointset_size, hypre_Index pointset_stride, hypre_Index *pointset_indices ) { hypre_PointRelaxData *relax_data = relax_vdata; HYPRE_Int i; /* free up old pointset memory */ hypre_TFree(relax_data -> pointset_indices[pointset]); /* alloc new pointset memory */ (relax_data -> pointset_indices[pointset]) = hypre_TAlloc(hypre_Index, pointset_size); (relax_data -> pointset_sizes[pointset]) = pointset_size; hypre_CopyIndex(pointset_stride, (relax_data -> pointset_strides[pointset])); for (i = 0; i < pointset_size; i++) { hypre_CopyIndex(pointset_indices[i], (relax_data -> pointset_indices[pointset][i])); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetPointsetRank( void *relax_vdata, HYPRE_Int pointset, HYPRE_Int pointset_rank ) { hypre_PointRelaxData *relax_data = relax_vdata; (relax_data -> pointset_ranks[pointset]) = pointset_rank; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetTempVec( void *relax_vdata, hypre_StructVector *t ) { hypre_PointRelaxData *relax_data = relax_vdata; hypre_StructVectorDestroy(relax_data -> t); (relax_data -> t) = hypre_StructVectorRef(t); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetFinalRelativeResidualNorm( void * relax_vdata, double * norm ) { hypre_PointRelaxData *relax_data = relax_vdata; *norm = relax_data -> rresnorm; return 0; } /*-------------------------------------------------------------------------- * Special vector operation for use in hypre_PointRelax - * convex combination of vectors on specified pointsets. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_relax_wtx( void *relax_vdata, HYPRE_Int pointset, hypre_StructVector *t, hypre_StructVector *x ) /* Sets x to a convex combination of x and t, x = weight * t + (1-weight) * x, but only in the specified pointset */ { hypre_PointRelaxData *relax_data = relax_vdata; double weight = (relax_data -> weight); hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_ComputePkg **compute_pkgs = (relax_data -> compute_pkgs); hypre_ComputePkg *compute_pkg; hypre_IndexRef stride; hypre_IndexRef start; hypre_Index loop_size; double weightc = 1 - weight; double *xp, *tp; HYPRE_Int compute_i, i, j, xi, ti; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *x_data_box; hypre_Box *t_data_box; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); t_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(t), i); xp = hypre_StructVectorBoxData(x, i); tp = hypre_StructVectorBoxData(t, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { xp[xi] = weight*tp[ti] + weightc*xp[xi]; } hypre_BoxLoop2End(xi, ti); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Special vector operation for use in hypre_PointRelax - * vector copy on specified pointsets. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_relax_copy( void *relax_vdata, HYPRE_Int pointset, hypre_StructVector *t, hypre_StructVector *x ) /* Sets x to t, x=t, but only in the specified pointset. */ { hypre_PointRelaxData *relax_data = relax_vdata; hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_ComputePkg **compute_pkgs = (relax_data -> compute_pkgs); hypre_ComputePkg *compute_pkg; hypre_IndexRef stride; hypre_IndexRef start; hypre_Index loop_size; double *xp, *tp; HYPRE_Int compute_i, i, j, xi, ti; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *x_data_box; hypre_Box *t_data_box; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); t_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(t), i); xp = hypre_StructVectorBoxData(x, i); tp = hypre_StructVectorBoxData(t, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { xp[xi] = tp[ti]; } hypre_BoxLoop2End(xi, ti); } } } return hypre_error_flag; }
LAGraph_cc_fastsv3.c
/* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ /** * Code is based on the algorithm described in the following paper * Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component * Algorithm with Fast Convergence (SIAM PP20) * * Modified by Tim Davis, Texas A&M University **/ // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. #include "LAGraph.h" //------------------------------------------------------------------------------ // atomic_min_uint64: compute (*p) = min (*p, value), via atomic update //------------------------------------------------------------------------------ static inline void atomic_min_uint64 ( uint64_t *p, // input/output uint64_t value // input ) { uint64_t old, new ; do { // get the old value at (*p) // #pragma omp atomic read old = (*p) ; // compute the new minimum new = LAGRAPH_MIN (old, value) ; } while (!__sync_bool_compare_and_swap (p, old, new)) ; } //------------------------------------------------------------------------------ // Reduce_assign: w (index) += src //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT64, descriptor = NULL // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. #define LAGRAPH_FREE_ALL static GrB_Info Reduce_assign ( #if 0 GrB_Vector *w, // vector of size n, all entries present GrB_Vector *src, // vector of size n, all entries present #else GrB_Vector *w_handle, // vector of size n, all entries present GrB_Vector *s_handle, // vector of size n, all entries present #endif GrB_Index *index, // array of size n GrB_Index n, GrB_Index *I, // size n, containing [0, 1, 2, ..., n-1] GrB_Index *mem, int nthreads ) { #if 0 GrB_Index nw, ns; LAGr_Vector_nvals(&nw, w); LAGr_Vector_nvals(&ns, src); GrB_Index *sval = mem, *wval = sval + nw; LAGr_Vector_extractTuples(NULL, wval, &nw, w); LAGr_Vector_extractTuples(NULL, sval, &ns, src); if (nthreads >= 4) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index i = 0; i < n; i++) { atomic_min_uint64 (&(wval [index [i]]), sval [i]) ; // if (sval[i] < wval[index[i]]) // wval[index[i]] = sval[i]; } } else { for (GrB_Index i = 0; i < n; i++) { if (sval[i] < wval[index[i]]) wval[index[i]] = sval[i]; } } LAGr_Vector_clear(w); LAGr_Vector_build(w, I, wval, nw, GrB_PLUS_UINT64); #else GrB_Type w_type, s_type ; GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i ; uint64_t *w_x, *s_x ; LAGr_Vector_export (w_handle, &w_type, &w_n, &w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_export (s_handle, &s_type, &s_n, &s_nvals, &s_i, (void **) &s_x, NULL) ; #if 0 if (nthreads >= 4) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0 ; k < n ; k++) { atomic_min_uint64 (&(w_x [index [k]]), s_x [k]) ; } } else #endif { for (GrB_Index k = 0 ; k < n ; k++) { uint64_t i = index [k] ; w_x [i] = LAGRAPH_MIN (w_x [i], s_x [k]) ; } } LAGr_Vector_import (w_handle, w_type, w_n, w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_import (s_handle, s_type, s_n, s_nvals, &s_i, (void **) &s_x, NULL) ; #endif return GrB_SUCCESS; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (I); \ LAGRAPH_FREE (V); \ LAGRAPH_FREE (mem); \ LAGr_free (&f) ; \ LAGr_free (&gp); \ LAGr_free (&mngp); \ LAGr_free (&gp_new); \ LAGr_free (&mod); \ if (sanitize) LAGr_free (&S); \ } //------------------------------------------------------------------------------ // LAGraph_cc_fastsv3 //------------------------------------------------------------------------------ GrB_Info LAGraph_cc_fastsv3 ( GrB_Vector *result, // output: array of component identifiers GrB_Matrix A, // input matrix bool sanitize // if true, ensure A is symmetric ) { GrB_Info info; GrB_Index n, *mem = NULL, *I = NULL, *V = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix S = NULL ; LAGr_Matrix_nrows (&n, A) ; if (sanitize) { // S = A | A' LAGr_Matrix_new (&S, GrB_BOOL, n, n) ; LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ; } else { // Use the input as-is, and assume it is symmetric S = A ; } // determine # of threads to use for Reduce_assign int nthreads_max = LAGraph_get_nthreads ( ) ; int nthreads = n / (1024*1024) ; nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // vectors LAGr_Vector_new(&f, GrB_UINT64, n); LAGr_Vector_new(&gp_new, GrB_UINT64, n); LAGr_Vector_new(&mod, GrB_BOOL, n); // temporary arrays I = LAGraph_malloc (n, sizeof(GrB_Index)); V = LAGraph_malloc (n, sizeof(uint64_t)) ; mem = (GrB_Index*) LAGraph_malloc (2*n, sizeof(GrB_Index)) ; // prepare vectors for (GrB_Index i = 0; i < n; i++) I[i] = V[i] = i; LAGr_Vector_build (f, I, V, n, GrB_PLUS_UINT64); LAGr_Vector_dup (&gp, f); LAGr_Vector_dup (&mngp,f); // main computation bool diff = true ; while (diff) { // hooking & shortcutting LAGr_mxv (mngp, 0, GrB_MIN_UINT64, GxB_MIN_SECOND_UINT64, S, gp, 0); #if 0 LAGRAPH_OK (Reduce_assign (f, mngp, V, n, I, mem, nthreads)); #else LAGRAPH_OK (Reduce_assign (&f, &mngp, V, n, I, mem, nthreads)); #endif LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, mngp, 0); LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, gp, 0); // calculate grandparent LAGr_Vector_extractTuples (NULL, V, &n, f); LAGr_extract (gp_new, 0, 0, f, V, n, 0); // check termination LAGr_eWiseMult (mod, 0, 0, GrB_NE_UINT64, gp_new, gp, 0); LAGr_reduce (&diff, 0, GxB_LOR_BOOL_MONOID, mod, 0); // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } // free workspace and return result *result = f; f = NULL ; LAGRAPH_FREE_ALL ; return GrB_SUCCESS; }
blake2sp.c
/* * Copyright (c) 2015-2018 Nexenta Systems, inc. * * This file is part of EdgeFS Project * (see https://github.com/Nexenta/edgefs). * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* BLAKE2 reference source code package - optimized C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 8 /* blake2sp_init_param defaults to setting the expecting output length from the digest_length parameter block field. In some cases, however, we do not want this, as the output length of these instances is given by inner_length instead. */ static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P ) { int err = blake2s_init_param(S, P); S->outlen = P->inner_length; return err; } static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint64_t offset ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->xof_length = 0; P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2sp_init_leaf_param( S, P ); } static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->xof_length = 0; P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2s_init_param( S, P ); } int blake2sp_init( blake2sp_state *S, size_t outlen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; size_t i; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2sp_final( blake2sp_state *S, void *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; size_t i; if(out == NULL || outlen < S->outlen) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2S_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES; if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES; blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left ); } blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES ); } for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( S->R, out, S->outlen ); } int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; size_t i; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( keylen > BLAKE2S_KEYBYTES ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > i * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[i], in__, len ); } blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( FS, out, outlen ); } #if defined(BLAKE2SP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2S_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2S_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; /* Test simple API */ for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES ); if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2sp_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2sp_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) { goto fail; } } } puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif
ZMQComm.h
/*! @brief Flag for checking if this header has already been included. */ #ifndef YGGZMQCOMM_H_ #define YGGZMQCOMM_H_ #include <CommBase.h> #include "../datatypes/datatypes.h" #ifdef ZMQINSTALLED #include <czmq.h> #endif #ifdef __cplusplus /* If this is a C++ compiler, use C linkage */ extern "C" { #endif #ifdef ZMQINSTALLED static unsigned _zmq_rand_seeded = 0; static unsigned _last_port_set = 0; static int _last_port = 49152; /* static double _wait_send_t = 0; // 0.0001; */ static char _reply_msg[100] = "YGG_REPLY"; static char _purge_msg[100] = "YGG_PURGE"; static int _zmq_sleeptime = 10000; #ifdef _OPENMP #pragma omp threadprivate(_reply_msg, _purge_msg, _zmq_sleeptime) #endif static void *ygg_s_process_ctx = NULL; typedef struct ygg_zsock_t { uint32_t tag; // Object tag for runtime detection void *handle; // The libzmq socket handle char *endpoint; // Last bound endpoint, if any char *cache; // Holds last zsock_brecv strings int type; // Socket type size_t cache_size; // Current size of cache uint32_t routing_id; // Routing ID for server sockets } ygg_zsock_t; /*! @brief Initialize zeromq. @returns A zeromq context. */ #ifdef _OPENMP static inline void* ygg_zsys_init() { #pragma omp critical (zmq) { if (!(ygg_s_process_ctx)) { if (get_thread_id() == 0) { ygglog_debug("ygg_zsys_init: Creating ZMQ context."); ygg_s_process_ctx = zsys_init(); if (!(ygg_s_process_ctx)) { ygglog_error("ygg_zsys_init: ZMQ context is NULL."); } } else { ygglog_error("ygg_zsys_init: Can only initialize the " "zeromq context on the main thread. Call ygg_init " "before the threaded portion of your model."); } } } return ygg_s_process_ctx; }; #else #define ygg_zsys_init zsys_init #endif /*! @brief Shutdown zeromq. */ #ifdef _OPENMP static void ygg_zsys_shutdown() { #pragma omp critical (zmq) { ygg_s_process_ctx = NULL; zsys_shutdown(); } }; #else #define ygg_zsys_shutdown zsys_shutdown #endif /*! @brief Destroy a socket in thread safe way. @param[in] self_p zsock_t** Pointer to a CZMQ socket wrapper struct. */ #ifdef _OPENMP static inline void ygg_zsock_destroy(zsock_t **self_p) { // Recreation of czmq zsock_destroy that is OMP aware /* assert(self_p); */ if (*self_p) { ygg_zsock_t *self = (ygg_zsock_t*)(*self_p); /* assert (zsock_is (*self_p)); */ self->tag = 0xDeadBeef; zmq_close (self->handle); freen (self->endpoint); freen (self->cache); freen (self); *self_p = NULL; } }; #else #define ygg_zsock_destroy zsock_destroy #endif /*! @brief Get a new socket, using the exising context. @param[in] type int Socket type. @returns zsock_t* CZMQ socket wrapper struct. */ #ifdef _OPENMP static inline zsock_t * ygg_zsock_new(int type) { // Recreation of czmq zsock_new that is OMP aware ygg_zsock_t *self = (ygg_zsock_t *) zmalloc (sizeof (ygg_zsock_t)); if (!(self)) { ygglog_error("ygg_zsock_new: Error allocating for new socket."); return NULL; } self->tag = 0xcafe0004; self->type = type; void* ctx = ygg_zsys_init(); if (!(ctx)) { ygglog_error("ygg_zsock_new: Context is NULL."); freen(self); return NULL; } #pragma omp critical (zmq) { self->handle = zmq_socket (ctx, type); } if (!(self->handle)) { ygglog_error("ygg_zsock_new: Error creating new socket."); freen(self); return NULL; } return (zsock_t*)(self); }; #else #define ygg_zsock_new zsock_new #endif static inline zsock_t* create_zsock(int type) { zsock_t* out = ygg_zsock_new(type); zsock_set_linger(out, 0); zsock_set_immediate(out, 1); return out; }; /*! @brief Struct to store info for reply. */ typedef struct zmq_reply_t { int nsockets; zsock_t **sockets; char **addresses; int n_msg; int n_rep; } zmq_reply_t; // Forward declarations static inline int zmq_comm_nmsg(const comm_t *x); static inline int zmq_comm_recv(const comm_t *x, char **data, const size_t len, const int allow_realloc); /*! @brief Free a reply structure. @param[in] x zmq_reply_t * Structure to free. @returns int 0 if successfull, -1 otherwise. */ static inline int free_zmq_reply(zmq_reply_t *x) { int i = 0; if (x != NULL) { if (x->sockets != NULL) { for (i = 0; i < x->nsockets; i++) { if (x->sockets[i] != NULL) { ygg_zsock_destroy(&(x->sockets[i])); x->sockets[i] = NULL; } } free(x->sockets); } if (x->addresses != NULL) { for (i = 0; i < x->nsockets; i++) { if (x->addresses[i] != NULL) { free(x->addresses[i]); x->addresses[i] = NULL; } } free(x->addresses); } x->nsockets = 0; } return 0; } /*! @brief Add empty reply structure information to comm. @param[in] comm comm_t * Comm to initialize reply for. @returns int 0 if successfull, -1 otherwise. */ static inline int init_zmq_reply(comm_t *comm) { zmq_reply_t *zrep = (zmq_reply_t*)malloc(sizeof(zmq_reply_t)); if (zrep == NULL) { ygglog_error("init_zmq_reply(%s): Failed to malloc reply.", comm->name); return -1; } zrep->nsockets = 0; zrep->sockets = NULL; zrep->addresses = NULL; zrep->n_msg = 0; zrep->n_rep = 0; comm->reply = (void*)zrep; return 0; }; /*! @brief Locate matching reply socket. @param[in] comm comm_t* Comm that should be checked for matching reply socket. @param[in] address char* Address that should be matched against. @returns int Index of matched socket, -1 if no match, -2 if error. */ static inline int find_reply_socket(const comm_t *comm, const char *address) { int ret = -1; // Get reply zmq_reply_t *zrep = (zmq_reply_t*)(comm->reply); if (zrep == NULL) { ygglog_error("find_reply_socket(%s): Reply structure not initialized.", comm->name); return -2; } int i = 0; for (i = 0; i < zrep->nsockets; i++) { if (strcmp(zrep->addresses[i], address) == 0) { ret = i; break; } } return ret; }; /*! @brief Request confirmation from receiving socket. @param[in] comm comm_t* Comm structure to do reply for. @returns int 0 if successful, -2 on EOF, -1 otherwise. */ static inline int do_reply_send(const comm_t *comm) { // Get reply zmq_reply_t *zrep = (zmq_reply_t*)(comm->reply); if (zrep == NULL) { ygglog_error("do_reply_send(%s): Reply structure not initialized.", comm->name); return -1; } zrep->n_msg++; zsock_t *s = (zsock_t*)(zrep->sockets[0]); if (s == NULL) { ygglog_error("do_reply_send(%s): Socket is NULL.", comm->name); return -1; } // Poll ygglog_debug("do_reply_send(%s): address=%s, begin", comm->name, zrep->addresses[0]); #if defined(__cplusplus) && defined(_WIN32) // TODO: There seems to be an error in the poller when using it in C++ #else zpoller_t *poller = zpoller_new(s, NULL); if (!(poller)) { ygglog_error("do_reply_send(%s): Could not create poller", comm->name); return -1; } assert(poller); ygglog_debug("do_reply_send(%s): waiting on poller...", comm->name); void *p = zpoller_wait(poller, -1); //void *p = zpoller_wait(poller, 1000); ygglog_debug("do_reply_send(%s): poller returned", comm->name); if (p == NULL) { if (zpoller_terminated(poller)) { ygglog_error("do_reply_send(%s): Poller interrupted", comm->name); } else if (zpoller_expired(poller)) { ygglog_error("do_reply_send(%s): Poller expired", comm->name); } else { ygglog_error("do_reply_send(%s): Poller failed", comm->name); } zpoller_destroy(&poller); return -1; } zpoller_destroy(&poller); #endif // Receive zframe_t *msg = zframe_recv(s); if (msg == NULL) { ygglog_error("do_reply_send(%s): did not receive", comm->name); return -1; } char *msg_data = (char*)zframe_data(msg); // Check for EOF int is_purge = 0; if (strcmp(msg_data, YGG_MSG_EOF) == 0) { ygglog_debug("do_reply_send(%s): EOF received", comm->name); zrep->n_msg = 0; zrep->n_rep = 0; return -2; } else if (strcmp(msg_data, _purge_msg) == 0) { is_purge = 1; } // Send // zsock_set_linger(s, _zmq_sleeptime); int ret = zframe_send(&msg, s, 0); // Check for purge or EOF if (ret < 0) { ygglog_error("do_reply_send(%s): Error sending reply frame.", comm->name); zframe_destroy(&msg); } else { if (is_purge == 1) { ygglog_debug("do_reply_send(%s): PURGE received", comm->name); zrep->n_msg = 0; zrep->n_rep = 0; ret = do_reply_send(comm); } else { zrep->n_rep++; } } ygglog_debug("do_reply_send(%s): address=%s, end", comm->name, zrep->addresses[0]); #if defined(__cplusplus) && defined(_WIN32) // TODO: There seems to be an error in the poller when using it in C++ #else if (ret >= 0) { poller = zpoller_new(s, NULL); if (!(poller)) { ygglog_error("do_reply_send(%s): Could not create poller", comm->name); return -1; } assert(poller); ygglog_debug("do_reply_send(%s): waiting on poller...", comm->name); p = zpoller_wait(poller, 10); ygglog_debug("do_reply_send(%s): poller returned", comm->name); zpoller_destroy(&poller); } #endif return ret; }; /*! @brief Send confirmation to sending socket. @param[in] comm comm_t* Comm structure to do reply for. @param[in] isock int Index of socket that reply should be done for. @param[in] msg char* Mesage to send/recv. @returns int 0 if successfule, -1 otherwise. */ static inline int do_reply_recv(const comm_t *comm, const int isock, const char *msg) { // Get reply zmq_reply_t *zrep = (zmq_reply_t*)(comm->reply); if (zrep == NULL) { ygglog_error("do_reply_recv(%s): Reply structure not initialized.", comm->name); return -1; } zsock_t *s = (zsock_t*)(zrep->sockets[isock]); if (s == NULL) { ygglog_error("do_reply_recv(%s): Socket is NULL.", comm->name); return -1; } ygglog_debug("do_reply_recv(%s): address=%s, begin", comm->name, zrep->addresses[isock]); zframe_t *msg_send = zframe_new(msg, strlen(msg)); if (msg_send == NULL) { ygglog_error("do_reply_recv(%s): Error creating frame.", comm->name); return -1; } // Send int ret = zframe_send(&msg_send, s, 0); if (ret < 0) { ygglog_error("do_reply_recv(%s): Error sending confirmation.", comm->name); zframe_destroy(&msg_send); return -1; } if (strcmp(msg, YGG_MSG_EOF) == 0) { ygglog_info("do_reply_recv(%s): EOF confirmation.", comm->name); zrep->n_msg = 0; zrep->n_rep = 0; zsock_set_linger(s, _zmq_sleeptime); return -2; } // Poll to prevent block ygglog_debug("do_reply_recv(%s): address=%s, polling for reply", comm->name, zrep->addresses[isock]); #if defined(__cplusplus) && defined(_WIN32) // TODO: There seems to be an error in the poller when using it in C++ #else zpoller_t *poller = zpoller_new(s, NULL); if (!(poller)) { ygglog_error("do_reply_send(%s): Could not create poller", comm->name); return -1; } assert(poller); ygglog_debug("do_reply_recv(%s): waiting on poller...", comm->name); void *p = zpoller_wait(poller, 1000); ygglog_debug("do_reply_recv(%s): poller returned", comm->name); if (p == NULL) { if (zpoller_terminated(poller)) { ygglog_error("do_reply_recv(%s): Poller interrupted", comm->name); } else if (zpoller_expired(poller)) { ygglog_error("do_reply_recv(%s): Poller expired", comm->name); } else { ygglog_error("do_reply_recv(%s): Poller failed", comm->name); } zpoller_destroy(&poller); return -1; } zpoller_destroy(&poller); #endif // Receive zframe_t *msg_recv = zframe_recv(s); if (msg_recv == NULL) { ygglog_error("do_reply_recv(%s): did not receive", comm->name); return -1; } zframe_destroy(&msg_recv); zrep->n_rep++; ygglog_debug("do_reply_recv(%s): address=%s, end", comm->name, zrep->addresses[isock]); return 0; }; /*! @brief Add reply socket information to a send comm. @param[in] comm comm_t* Comm that confirmation is for. @returns char* Reply socket address. */ static inline char *set_reply_send(const comm_t *comm) { char *out = NULL; // Get reply zmq_reply_t *zrep = (zmq_reply_t*)(comm->reply); if (zrep == NULL) { ygglog_error("set_reply_send(%s): Reply structure not initialized.", comm->name); return out; } // Create socket if (zrep->nsockets == 0) { zrep->sockets = (zsock_t**)malloc(sizeof(zsock_t*)); if (zrep->sockets == NULL) { ygglog_error("set_reply_send(%s): Error mallocing sockets.", comm->name); return out; } zrep->nsockets = 1; zrep->sockets[0] = create_zsock(ZMQ_REP); if (zrep->sockets[0] == NULL) { ygglog_error("set_reply_send(%s): Could not initialize empty socket.", comm->name); return out; } char protocol[50] = "tcp"; char host[50] = "localhost"; if (strcmp(host, "localhost") == 0) strncpy(host, "127.0.0.1", 50); char address[100]; int port = -1; #ifdef _OPENMP #pragma omp critical (zmqport) { #endif if (_last_port_set == 0) { ygglog_debug("model_index = %s", getenv("YGG_MODEL_INDEX")); _last_port = 49152 + 1000 * atoi(getenv("YGG_MODEL_INDEX")); _last_port_set = 1; ygglog_debug("_last_port = %d", _last_port); } sprintf(address, "%s://%s:*[%d-]", protocol, host, _last_port + 1); port = zsock_bind(zrep->sockets[0], "%s", address); if (port != -1) _last_port = port; #ifdef _OPENMP } #endif if (port == -1) { ygglog_error("set_reply_send(%s): Could not bind socket to address = %s", comm->name, address); return out; } sprintf(address, "%s://%s:%d", protocol, host, port); zrep->addresses = (char**)malloc(sizeof(char*)); zrep->addresses[0] = (char*)malloc((strlen(address) + 1)*sizeof(char)); strncpy(zrep->addresses[0], address, strlen(address) + 1); ygglog_debug("set_reply_send(%s): New reply socket: %s", comm->name, address); } out = zrep->addresses[0]; return out; }; /*! @brief Add reply socket information to a recv comm. @param[in] comm comm_t* Comm that confirmation is for. @returns int Index of the reply socket. */ static inline int set_reply_recv(const comm_t *comm, const char* address) { int out = -1; // Get reply zmq_reply_t *zrep = (zmq_reply_t*)(comm->reply); if (zrep == NULL) { ygglog_error("set_reply_recv(%s): Reply structure not initialized.", comm->name); return out; } // Match address and create if it dosn't exist int isock = find_reply_socket(comm, address); if (isock < 0) { if (isock == -2) { ygglog_error("set_reply_recv(%s): Error locating socket.", comm->name); return out; } // Realloc arrays zrep->sockets = (zsock_t**)realloc(zrep->sockets, sizeof(zsock_t*)*(zrep->nsockets + 1)); if (zrep->sockets == NULL) { ygglog_error("set_reply_recv(%s): Error reallocing sockets.", comm->name); return out; } zrep->addresses = (char**)realloc(zrep->addresses, sizeof(char*)*(zrep->nsockets + 1)); if (zrep->addresses == NULL) { ygglog_error("set_reply_recv(%s): Error reallocing addresses.", comm->name); return out; } // Create new socket isock = zrep->nsockets; zrep->nsockets++; zrep->sockets[isock] = create_zsock(ZMQ_REQ); if (zrep->sockets[isock] == NULL) { ygglog_error("set_reply_recv(%s): Could not initialize empty socket.", comm->name); return out; } zrep->addresses[isock] = (char*)malloc(sizeof(char)*(strlen(address) + 1)); if (zrep->addresses[isock] == NULL) { ygglog_error("set_reply_recv(%s): Could not realloc new address.", comm->name); return out; } strncpy(zrep->addresses[isock], address, strlen(address) + 1); int ret = zsock_connect(zrep->sockets[isock], "%s", address); if (ret < 0) { ygglog_error("set_reply_recv(%s): Could not connect to socket.", comm->name); return out; } ygglog_debug("set_reply_recv(%s): New recv socket: %s", comm->name, address); } return isock; }; /*! @brief Add information about reply socket to outgoing message. @param[in] comm comm_t* Comm that confirmation is for. @param[in] data char* Message that reply info should be added to. @param[in] len int Length of the outgoing message. @returns char* Message with reply information added. */ static inline char* check_reply_send(const comm_t *comm, const char *data, const int len, int *new_len) { // Prevent C4100 warning on windows by referencing param #ifdef _WIN32 UNUSED(comm); #endif char *out = (char*)malloc(len + 1); memcpy(out, data, len + 1); new_len[0] = len; return out; }; /*! @brief Get reply information from message. @param[in] comm comm_* Comm structure for incoming message. @param[in, out] data char* Received message containing reply info that will be removed on return. @param[in] len size_t Length of received message. @returns int Length of message without the reply info. -1 if there is an error. */ static inline int check_reply_recv(const comm_t *comm, char *data, const size_t len) { int new_len = (int)len; int ret = 0; // Get reply zmq_reply_t *zrep = (zmq_reply_t*)(comm->reply); if (zrep == NULL) { ygglog_error("check_reply_recv(%s): Reply structure not initialized.", comm->name); return -1; } zrep->n_msg++; // Extract address comm_head_t head = parse_comm_header(data, len); if (!(head.flags & HEAD_FLAG_VALID)) { ygglog_error("check_reply_recv(%s): Invalid header.", comm->name); return -1; } char address[100]; size_t address_len; if ((comm->flags & COMM_FLAG_WORKER) && (zrep->nsockets == 1)) { address_len = strlen(zrep->addresses[0]); memcpy(address, zrep->addresses[0], address_len); } else if (strlen(head.zmq_reply) > 0) { address_len = strlen(head.zmq_reply); memcpy(address, head.zmq_reply, address_len); } else { ygglog_error("check_reply_recv(%s): Error parsing reply header in '%s'", comm->name, data); destroy_header(&head); return -1; } destroy_header(&head); address[address_len] = '\0'; // Match address and create if it dosn't exist int isock = set_reply_recv(comm, address); if (isock < 0) { ygglog_error("check_reply_recv(%s): Error setting reply socket."); return -1; } // Confirm message receipt ret = do_reply_recv(comm, isock, _reply_msg); if (ret < 0) { ygglog_error("check_reply_recv(%s): Error during reply.", comm->name); return -1; } return new_len; }; /*! @brief Create a new socket. @param[in] comm comm_t * Comm structure initialized with new_comm_base. @returns int -1 if the address could not be created. */ static inline int new_zmq_address(comm_t *comm) { // TODO: Get protocol/host from input char protocol[50] = "tcp"; char host[50] = "localhost"; char address[100]; comm->msgBufSize = 100; if (strcmp(host, "localhost") == 0) strncpy(host, "127.0.0.1", 50); if ((strcmp(protocol, "inproc") == 0) || (strcmp(protocol, "ipc") == 0)) { // TODO: small chance of reusing same number int key = 0; #ifdef _OPENMP #pragma omp critical (zmqport) { #endif if (!(_zmq_rand_seeded)) { srand(ptr2seed(comm)); _zmq_rand_seeded = 1; } #ifdef _OPENMP } #endif while (key == 0) key = rand(); if (strlen(comm->name) == 0) sprintf(comm->name, "tempnewZMQ-%d", key); sprintf(address, "%s://%s", protocol, comm->name); } else { #ifdef _OPENMP #pragma omp critical (zmqport) { #endif if (_last_port_set == 0) { ygglog_debug("model_index = %s", getenv("YGG_MODEL_INDEX")); _last_port = 49152 + 1000 * atoi(getenv("YGG_MODEL_INDEX")); _last_port_set = 1; ygglog_debug("_last_port = %d", _last_port); } sprintf(address, "%s://%s:*[%d-]", protocol, host, _last_port + 1); #ifdef _OPENMP } #endif /* strcat(address, ":!"); // For random port */ } // Bind zsock_t *s = NULL; if (comm->flags & COMM_FLAG_CLIENT_RESPONSE) { s = create_zsock(ZMQ_ROUTER); } else if (comm->flags & COMM_ALLOW_MULTIPLE_COMMS) { s = create_zsock(ZMQ_DEALER); } else { s = create_zsock(ZMQ_PAIR); } if (s == NULL) { ygglog_error("new_zmq_address: Could not initialize empty socket."); return -1; } int port = zsock_bind(s, "%s", address); if (port == -1) { ygglog_error("new_zmq_address: Could not bind socket to address = %s", address); return -1; } // Add port to address #ifdef _OPENMP #pragma omp critical (zmqport) { #endif if ((strcmp(protocol, "inproc") != 0) && (strcmp(protocol, "ipc") != 0)) { _last_port = port; sprintf(address, "%s://%s:%d", protocol, host, port); } #ifdef _OPENMP } #endif strncpy(comm->address, address, COMM_ADDRESS_SIZE); ygglog_debug("new_zmq_address: Bound socket to %s", comm->address); if (strlen(comm->name) == 0) sprintf(comm->name, "tempnewZMQ-%d", port); comm->handle = (void*)s; // Init reply int ret = init_zmq_reply(comm); return ret; }; /*! @brief Initialize a ZeroMQ communicator. @param[in] comm comm_t * Comm structure initialized with init_comm_base. @returns int -1 if the comm could not be initialized. */ static inline int init_zmq_comm(comm_t *comm) { int ret = -1; if (!(comm->flags & COMM_FLAG_VALID)) return ret; comm->msgBufSize = 100; zsock_t *s; if (comm->flags & (COMM_FLAG_SERVER | COMM_ALLOW_MULTIPLE_COMMS)) { s = create_zsock(ZMQ_DEALER); } else { s = create_zsock(ZMQ_PAIR); } if (s == NULL) { ygglog_error("init_zmq_address: Could not initialize empty socket."); return -1; } ret = zsock_connect(s, "%s", comm->address); if (ret == -1) { ygglog_error("init_zmq_address: Could not connect socket to address = %s", comm->address); ygg_zsock_destroy(&s); return ret; } ygglog_debug("init_zmq_address: Connected socket to %s", comm->address); if (strlen(comm->name) == 0) sprintf(comm->name, "tempinitZMQ-%s", comm->address); // Asign to void pointer comm->handle = (void*)s; ret = init_zmq_reply(comm); comm->flags = comm->flags | COMM_ALWAYS_SEND_HEADER; return ret; }; /*! @brief Perform deallocation for ZMQ communicator. @param[in] x comm_t Pointer to communicator to deallocate. @returns int 1 if there is and error, 0 otherwise. */ static inline int free_zmq_comm(comm_t *x) { int ret = 0; if (x == NULL) return ret; // Drain input if ((is_recv(x->direction)) && (x->flags & COMM_FLAG_VALID)) { if (_ygg_error_flag == 0) { size_t data_len = 100; char *data = (char*)malloc(data_len); comm_head_t head; bool is_eof_flag = false; while (zmq_comm_nmsg(x) > 0) { ret = zmq_comm_recv(x, &data, data_len, 1); if (ret >= 0) { head = parse_comm_header(data, ret); if (strncmp(YGG_MSG_EOF, data + head.bodybeg, strlen(YGG_MSG_EOF)) == 0) is_eof_flag = true; destroy_header(&head); if ((head.flags & HEAD_FLAG_VALID) && is_eof_flag) { x->const_flags[0] = x->const_flags[0] | COMM_EOF_RECV; break; } } } free(data); } } // Free reply if (x->reply != NULL) { zmq_reply_t *zrep = (zmq_reply_t*)(x->reply); // Free reply ret = free_zmq_reply(zrep); free(x->reply); x->reply = NULL; } if (x->handle != NULL) { zsock_t *s = (zsock_t*)(x->handle); if (s != NULL) { ygglog_debug("Destroying socket: %s", x->address); ygg_zsock_destroy(&s); } x->handle = NULL; } ygglog_debug("free_zmq_comm: finished"); return ret; }; /*! @brief Get number of messages in the comm. @param[in] comm_t Communicator to check. @returns int Number of messages. -1 indicates an error. */ static inline int zmq_comm_nmsg(const comm_t *x) { int out = 0; if (is_recv(x->direction)) { if (x->handle != NULL) { zsock_t *s = (zsock_t*)(x->handle); zpoller_t *poller = zpoller_new(s, NULL); if (poller == NULL) { ygglog_error("zmq_comm_nmsg: Could not create poller"); return -1; } void *p = zpoller_wait(poller, 1); if (p == NULL) { if (zpoller_terminated(poller)) { ygglog_error("zmq_comm_nmsg: Poller interrupted"); out = -1; } else { out = 0; } } else { out = 1; } zpoller_destroy(&poller); } } else { /* if (x->last_send[0] != 0) { */ /* time_t now; */ /* time(&now); */ /* double elapsed = difftime(now, x->last_send[0]); */ /* if (elapsed > _wait_send_t) */ /* out = 0; */ /* else */ /* out = 1; */ /* } */ zmq_reply_t *zrep = (zmq_reply_t*)(x->reply); if (zrep != NULL) { ygglog_debug("zmq_comm_nmsg(%s): nmsg = %d, nrep = %d", x->name, zrep->n_msg, zrep->n_rep); out = zrep->n_msg - zrep->n_rep; } } return out; }; /*! @brief Send a message to the comm. Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the message is larger, it will not be sent. @param[in] x comm_t* structure that comm should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static inline int zmq_comm_send(const comm_t *x, const char *data, const size_t len) { ygglog_debug("zmq_comm_send(%s): %d bytes", x->name, len); if (comm_base_send(x, data, len) == -1) return -1; zsock_t *s = (zsock_t*)(x->handle); if (s == NULL) { ygglog_error("zmq_comm_send(%s): socket handle is NULL", x->name); return -1; } int new_len = 0; char *new_data = check_reply_send(x, data, (int)len, &new_len); if (new_data == NULL) { ygglog_error("zmq_comm_send(%s): Adding reply address failed.", x->name); return -1; } zframe_t *f = zframe_new(new_data, new_len); int ret = -1; if (f == NULL) { ygglog_error("zmq_comm_send(%s): frame handle is NULL", x->name); } else { ret = zframe_send(&f, s, 0); if (ret < 0) { ygglog_error("zmq_comm_send(%s): Error in zframe_send", x->name); zframe_destroy(&f); } } // Get reply if (ret >= 0) { ret = do_reply_send(x); if (ret < 0) { if (ret == -2) { ygglog_error("zmq_comm_send(%s): EOF received", x->name); } else { ygglog_error("zmq_comm_send(%s): Error in do_reply_send", x->name); } } } ygglog_debug("zmq_comm_send(%s): returning %d", x->name, ret); free(new_data); return ret; }; static inline zframe_t * zmq_comm_recv_zframe(const comm_t* x) { ygglog_debug("zmq_comm_recv_zframe(%s)", x->name); zsock_t *s = (zsock_t*)(x->handle); if (s == NULL) { ygglog_error("zmq_comm_recv_zframe(%s): socket handle is NULL", x->name); return NULL; } clock_t start = clock(); while ((((double)(clock() - start))/CLOCKS_PER_SEC) < 180) { int nmsg = zmq_comm_nmsg(x); if (nmsg < 0) return NULL; else if (nmsg > 0) break; else { ygglog_debug("zmq_comm_recv_zframe(%s): no messages, sleep %d", x->name, YGG_SLEEP_TIME); usleep(YGG_SLEEP_TIME); } } ygglog_debug("zmq_comm_recv_zframe(%s): receiving", x->name); zframe_t *out = NULL; if (x->flags & COMM_FLAG_CLIENT_RESPONSE) { out = zframe_recv(s); if (out == NULL) { ygglog_debug("zmq_comm_recv_zframe(%s): did not receive identity", x->name); return NULL; } zframe_destroy(&out); out = NULL; } out = zframe_recv(s); if (out == NULL) { ygglog_debug("zmq_comm_recv_zframe(%s): did not receive", x->name); return NULL; } return out; }; /*! @brief Receive a message from an input comm. Receive a message smaller than YGG_MSG_MAX bytes from an input comm. @param[in] x comm_t* structure that message should be sent to. @param[out] data char ** pointer to allocated buffer where the message should be saved. This should be a malloc'd buffer if allow_realloc is 1. @param[in] len const size_t length of the allocated message buffer in bytes. @param[in] allow_realloc const int If 1, the buffer will be realloced if it is not large enought. Otherwise an error will be returned. @returns int -1 if message could not be received. Length of the received message if message was received. */ static inline int zmq_comm_recv(const comm_t* x, char **data, const size_t len, const int allow_realloc) { int ret = -1; ygglog_debug("zmq_comm_recv(%s)", x->name); zsock_t *s = (zsock_t*)(x->handle); if (s == NULL) { ygglog_error("zmq_comm_recv(%s): socket handle is NULL", x->name); return ret; } zframe_t *out = zmq_comm_recv_zframe(x); if (out == NULL) { ygglog_debug("zmq_comm_recv(%s): did not receive", x->name); return ret; } // Check for server signon and respond while (strncmp((char*)zframe_data(out), "ZMQ_SERVER_SIGNING_ON::", 23) == 0) { ygglog_debug("zmq_comm_recv(%s): Received sign-on", x->name); char* client_address = (char*)zframe_data(out) + 23; // create a DEALER socket and connect to address zsock_t *client_socket = create_zsock(ZMQ_DEALER); if (client_socket == NULL) { ygglog_error("zmq_comm_recv(%s): Could not initalize the client side of the proxy socket to confirm signon", x->name); zframe_destroy(&out); return ret; } zsock_set_sndtimeo(client_socket, _zmq_sleeptime); zsock_set_immediate(client_socket, 1); zsock_set_linger(client_socket, _zmq_sleeptime); if (zsock_connect(client_socket, "%s", client_address) < 0) { ygglog_error("zmq_comm_recv(%s): Error when connecting to the client proxy socket to respond to signon: %s", x->name, client_address); zframe_destroy(&out); ygg_zsock_destroy(&client_socket); return ret; } zframe_t *response = zframe_new(zframe_data(out), zframe_size(out)); if (response == NULL) { ygglog_error("zmq_comm_recv(%s): Error creating response message frame.", x->name); zframe_destroy(&out); zframe_destroy(&response); ygg_zsock_destroy(&client_socket); return ret; } if (zframe_send(&response, client_socket, 0) < 0) { ygglog_error("zmq_comm_recv(%s): Error sending response message.", x->name); zframe_destroy(&out); zframe_destroy(&response); ygg_zsock_destroy(&client_socket); return ret; } zframe_destroy(&response); ygg_zsock_destroy(&client_socket); zframe_destroy(&out); out = zmq_comm_recv_zframe(x); if (out == NULL) { ygglog_debug("zmq_comm_recv(%s): did not receive", x->name); return ret; } } // Realloc and copy data size_t len_recv = zframe_size(out) + 1; // size_t len_recv = (size_t)ret + 1; if (len_recv > len) { if (allow_realloc) { ygglog_debug("zmq_comm_recv(%s): reallocating buffer from %d to %d bytes.", x->name, len, len_recv); (*data) = (char*)realloc(*data, len_recv); if (*data == NULL) { ygglog_error("zmq_comm_recv(%s): failed to realloc buffer.", x->name); zframe_destroy(&out); return -1; } } else { ygglog_error("zmq_comm_recv(%s): buffer (%d bytes) is not large enough for message (%d bytes)", x->name, len, len_recv); zframe_destroy(&out); return -((int)(len_recv - 1)); } } memcpy(*data, zframe_data(out), len_recv - 1); zframe_destroy(&out); (*data)[len_recv-1] = '\0'; ret = (int)len_recv - 1; /* if (strlen(*data) != ret) { ygglog_error("zmq_comm_recv(%s): Size of string (%d) doesn't match expected (%d)", x->name, strlen(*data), ret); return -1; } */ // Check reply ret = check_reply_recv(x, *data, ret); if (ret < 0) { ygglog_error("zmq_comm_recv(%s): failed to check for reply socket.", x->name); return ret; } ygglog_debug("zmq_comm_recv(%s): returning %d", x->name, ret); return ret; }; // Definitions in the case where ZMQ libraries not installed #else /*ZMQINSTALLED*/ /*! @brief Print error message about ZMQ library not being installed. */ static inline void ygg_zsys_shutdown() { ygglog_error("Compiler flag 'ZMQINSTALLED' not defined so ZMQ bindings are disabled."); }; /*! @brief Print error message about ZMQ library not being installed. */ static inline void* ygg_zsys_init() { ygglog_error("Compiler flag 'ZMQINSTALLED' not defined so ZMQ bindings are disabled."); return NULL; }; /*! @brief Print error message about ZMQ library not being installed. */ static inline void zmq_install_error() { ygglog_error("Compiler flag 'ZMQINSTALLED' not defined so ZMQ bindings are disabled."); }; /*! @brief Perform deallocation for ZMQ communicator. @param[in] x comm_t Pointer to communicator to deallocate. @returns int 1 if there is and error, 0 otherwise. */ static inline int free_zmq_comm(comm_t *x) { zmq_install_error(); return 1; }; /*! @brief Create a new socket. @param[in] comm comm_t * Comm structure initialized with new_comm_base. @returns int -1 if the address could not be created. */ static inline int new_zmq_address(comm_t *comm) { zmq_install_error(); return -1; }; /*! @brief Initialize a ZeroMQ communicator. @param[in] comm comm_t * Comm structure initialized with init_comm_base. @returns int -1 if the comm could not be initialized. */ static inline int init_zmq_comm(comm_t *comm) { zmq_install_error(); return -1; }; /*! @brief Get number of messages in the comm. @param[in] x comm_t* Communicator to check. @returns int Number of messages. -1 indicates an error. */ static inline int zmq_comm_nmsg(const comm_t* x) { zmq_install_error(); return -1; }; /*! @brief Send a message to the comm. Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the message is larger, it will not be sent. @param[in] x comm_t* structure that comm should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static inline int zmq_comm_send(const comm_t* x, const char *data, const size_t len) { zmq_install_error(); return -1; }; /*! @brief Receive a message from an input comm. Receive a message smaller than YGG_MSG_MAX bytes from an input comm. @param[in] x comm_t* structure that message should be sent to. @param[out] data char ** pointer to allocated buffer where the message should be saved. This should be a malloc'd buffer if allow_realloc is 1. @param[in] len const size_t length of the allocated message buffer in bytes. @param[in] allow_realloc const int If 1, the buffer will be realloced if it is not large enought. Otherwise an error will be returned. @returns int -1 if message could not be received. Length of the received message if message was received. */ static inline int zmq_comm_recv(const comm_t* x, char **data, const size_t len, const int allow_realloc) { zmq_install_error(); return -1; }; /*! @brief Add reply socket information to a send comm. @param[in] comm comm_t* Comm that confirmation is for. @returns char* Reply socket address. */ static inline char *set_reply_send(const comm_t *comm) { zmq_install_error(); return NULL; }; /*! @brief Add reply socket information to a recv comm. @param[in] comm comm_t* Comm that confirmation is for. @param[in] address const char* Comm address. @returns int Index of the reply socket. */ static inline int set_reply_recv(const comm_t *comm, const char* address) { zmq_install_error(); return -1; }; #endif /*ZMQINSTALLED*/ #ifdef __cplusplus /* If this is a C++ compiler, end C linkage */ } #endif #endif /*YGGZMQCOMM_H_*/
zlook_ahead_update.c
/************************************************************************/ /*! @file * \brief Look-ahead update of the Schur complement. * * <pre> * -- Distributed SuperLU routine (version 4.0) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * */ #ifdef ISORT while (j < nub && iperm_u[j] <= k0 + num_look_aheads) #else while (j < nub && perm_u[2 * j] <= k0 + num_look_aheads) #endif { doublecomplex zero = {0.0, 0.0}; arrive_at_ublock (j, &iukp, &rukp, &jb, &ljb, &nsupc, iukp0, rukp0, usub, perm_u, xsup, grid); j++; jj0++; jj = iukp; lptr = lptr0; luptr = luptr0; while (usub[jj] == klst) ++jj; ldu = klst - usub[jj++]; ncols = 1; full = 1; for (; jj < iukp + nsupc; ++jj) { segsize = klst - usub[jj]; if (segsize) { ++ncols; if (segsize != ldu) full = 0; if (segsize > ldu) ldu = segsize; } } #if ( DEBUGlevel>=3 ) ++num_update; #endif if (0) { tempu = &uval[rukp]; } else { /* Copy block U(k,j) into tempU2d. */ #if ( DEBUGlevel>=3 ) printf ("(%d) full=%d,k=%d,jb=%d,ldu=%d,ncols=%d,nsupc=%d\n", iam, full, k, jb, ldu, ncols, nsupc); ++num_copy; #endif tempu = bigU; for (jj = iukp; jj < iukp + nsupc; ++jj) { segsize = klst - usub[jj]; if (segsize) { lead_zero = ldu - segsize; for (i = 0; i < lead_zero; ++i) tempu[i] = zero; tempu += lead_zero; for (i = 0; i < segsize; ++i) { tempu[i] = uval[rukp + i]; } rukp += segsize; tempu += segsize; } } tempu = bigU; rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */ } /* if full ... */ nbrow = lsub[1]; if (myrow == krow) nbrow = lsub[1] - lsub[3]; /* skip diagonal block for those rows */ // double ttx =SuperLU_timer_(); #ifdef _OPENMP #pragma omp parallel for \ private(lb,lptr,luptr,ib,tempv ) \ default(shared) schedule(dynamic) #endif for (lb = 0; lb < nlb; lb++) { int temp_nbrow; int_t lptr = lptr0; int_t luptr = luptr0; for (int i = 0; i < lb; ++i) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr + 1]; /* Number of full rows. */ lptr += LB_DESCRIPTOR; /* Skip descriptor. */ lptr += temp_nbrow; luptr += temp_nbrow; } #ifdef _OPENMP int_t thread_id = omp_get_thread_num (); #else int_t thread_id = 0; #endif doublecomplex * tempv = bigV + ldt*ldt*thread_id; int *indirect_thread = indirect + ldt * thread_id; int *indirect2_thread = indirect2 + ldt*thread_id; ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr + 1]; /* Number of full rows. */ assert (temp_nbrow <= nbrow); lptr += LB_DESCRIPTOR; /* Skip descriptor. */ /* calling gemm */ #if defined (USE_VENDOR_BLAS) zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &lusup[luptr + (knsupc - ldu) * nsupr], &nsupr, tempu, &ldu, &beta, tempv, &temp_nbrow, 1, 1); #else zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &lusup[luptr + (knsupc - ldu) * nsupr], &nsupr, tempu, &ldu, &beta, tempv, &temp_nbrow ); #endif /* Now scattering the output*/ if (ib < jb) { /* A(i,j) is in U. */ zscatter_u (ib, jb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, lsub, usub, tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid); } else { /* A(i,j) is in L. */ zscatter_l (ib, ljb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, usub, lsub, tempv, indirect_thread, indirect2_thread, Lrowind_bc_ptr, Lnzval_bc_ptr, grid); } } /* parallel for lb = 0, ... */ rukp += usub[iukp - 1]; /* Move to block U(k,j+1) */ iukp += nsupc; /* ==================================== * * == factorize and send if possible == * * ==================================== */ kk = jb; kcol = PCOL (kk, grid); #ifdef ISORT kk0 = iperm_u[j - 1]; #else kk0 = perm_u[2 * (j - 1)]; #endif look_id = kk0 % (1 + num_look_aheads); if (look_ahead[kk] == k0 && kcol == mycol) { /* current column is the last dependency */ look_id = kk0 % (1 + num_look_aheads); /* Factor diagonal and subdiagonal blocks and test for exact singularity. */ factored[kk] = 0; /* double ttt1 = SuperLU_timer_(); */ #if ( VAMPIR>=1 ) VT_begin (5); #endif PZGSTRF2(options, kk0, kk, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); #if ( VAMPIR>=1 ) VT_end (5); #endif /* stat->time7 += SuperLU_timer_() - ttt1; */ /* Multicasts numeric values of L(:,kk) to process rows. */ send_req = send_reqs[look_id]; msgcnt = msgcnts[look_id]; lk = LBj (kk, grid); /* Local block number. */ lsub1 = Lrowind_bc_ptr[lk]; lusup1 = Lnzval_bc_ptr[lk]; if (lsub1) { msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR; msgcnt[1] = lsub1[1] * SuperSize (kk); } else { msgcnt[0] = 0; msgcnt[1] = 0; } scp = &grid->rscp; /* The scope of process row. */ for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { #if ( PROFlevel>=1 ) TIC (t1); #endif #if ( VAMPIR>=1 ) VT_begin (1); #endif MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, kk0) /* (4*kk0)%tag_ub */ , scp->comm, &send_req[pj]); MPI_Isend (lusup1, msgcnt[1], SuperLU_MPI_DOUBLE_COMPLEX, pj, SLU_MPI_TAG (1, kk0) /* (4*kk0+1)%tag_ub */ , scp->comm, &send_req[pj + Pc]); #if ( VAMPIR>=1 ) VT_end (1); #endif #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; msg_cnt += 2; msg_vol += msgcnt[0] * iword + msgcnt[1] * dword; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] -2- Send L(:,%4d): #lsub %4d, #lusup %4d to Pj %2d, tags %d:%d \n", iam, kk, msgcnt[0], msgcnt[1], pj, SLU_MPI_TAG(0,kk0), SLU_MPI_TAG(1,kk0)); #endif } /* end if ( ToSendR[lk][pj] != EMPTY ) */ } /* end for pj ... */ } /* end if( look_ahead[kk] == k0 && kcol == mycol ) */ } /* end while j < nub and perm_u[j] <k0+NUM_LOOK_AHEAD */
pt_to_pt_overlap.c
/***************************************************************************** * * * Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 * * * * produced by * * * * Mark Bull, Jim Enright and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk * * * * * * Copyright 2012, The University of Edinburgh * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ /*-----------------------------------------------------------*/ /* Contains the point-to-point overlap mixed mode */ /* OpenMP/MPI benchmarks. */ /* This includes: -masteronly overlap */ /* -funnelled overlap */ /* -multiple overlap */ /*-----------------------------------------------------------*/ #include "pt_to_pt_overlap.h" /*-----------------------------------------------------------*/ /* overlap */ /* */ /* Driver subroutine for the overlap benchmark. */ /*-----------------------------------------------------------*/ int overlap(int benchmarkType){ int dataSizeIter; int sameNode; pingRank = PPRanks[0]; pongRank = PPRanks[1]; /* Check if pingRank and pongRank are on the same node */ sameNode = compareProcNames(pingRank,pongRank); /* Master process then does some reporting */ if (myMPIRank == 0){ /* print message saying if benchmark is inter or intra node */ printNodeReport(sameNode,pingRank,pongRank); /* then print report column headings. */ printBenchHeader(); } /* initialise repsToDo to defaultReps at start of benchmark */ repsToDo = defaultReps; /* Loop over data sizes */ dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */ while (dataSizeIter <= maxDataSize){ /* set sizeofBuffer */ sizeofBuffer = dataSizeIter * numThreads; /* allocate space for the main data arrays */ allocateOverlapData(sizeofBuffer); /* warm-up for either masteronly, funnelled or multiple */ if (benchmarkType == MASTERONLY){ /* perform masteronly warm-up sweep */ masteronlyOverlap(warmUpIters, dataSizeIter); } else if (benchmarkType == FUNNELLED){ /* perform funnelled warm-up sweep */ funnelledOverlap(warmUpIters, dataSizeIter); } else if (benchmarkType == SERIALIZED){ /* perform serialized warm-up sweep */ serializedOverlap(warmUpIters, dataSizeIter); } else if (benchmarkType == MULTIPLE){ multipleOverlap(warmUpIters, dataSizeIter); } else if (benchmarkType == TASK){ taskOverlap(warmUpIters, dataSizeIter); } /* perform verification test for the overlap */ testOverlap(sizeofBuffer, dataSizeIter); /* Initialise benchmark */ benchComplete = FALSE; /* keep executing benchmark until target time is reached */ while (benchComplete != TRUE){ /* Start the timer...MPI_Barrier to synchronise */ MPI_Barrier(comm); startTime = MPI_Wtime(); if (benchmarkType == MASTERONLY){ /* execute for repsToDo repetitions */ masteronlyOverlap(repsToDo, dataSizeIter); } else if (benchmarkType == FUNNELLED){ funnelledOverlap(repsToDo, dataSizeIter); } else if (benchmarkType == SERIALIZED){ serializedOverlap(repsToDo, dataSizeIter); } else if (benchmarkType == MULTIPLE){ multipleOverlap(repsToDo, dataSizeIter); } else if (benchmarkType == TASK){ taskOverlap(repsToDo, dataSizeIter); } /* Stop the timer...MPI_Barrier to synchronise processes */ MPI_Barrier(comm); finishTime = MPI_Wtime(); totalTime = finishTime - startTime; /* Call repTimeCheck function to test if target time is reached */ if (myMPIRank==0){ benchComplete = repTimeCheck(totalTime, repsToDo); } /* Ensure all procs have the same value of benchComplete */ /* and repsToDo */ MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); } /* Master process sets benchmark results */ if (myMPIRank == 0){ setReportParams(dataSizeIter, repsToDo, totalTime); printReport(); } /* Free the allocated space for the main data arrays */ freeOverlapData(); /* Update dataSize before the next iteration */ dataSizeIter = dataSizeIter * 2; /* double data size */ } return 0; } /*-----------------------------------------------------------*/ /* fwq */ /* */ /* Fixed Work Quantum microbenchmark */ /*-----------------------------------------------------------*/ void fwq(int work, int iters) { register unsigned long done; register long long count; register long long wl = -(1 << work); count = wl; for (count=wl; count<0;) { register int k; for (k=0;k<32;k++) count++; for (k=0;k<31;k++) count--; } for (done=0; done<iters; done++) { count = wl; for (count=wl; count<0;) { register int k; for (k=0;k<32;k++) count++; for (k=0;k<31;k++) count--; } } } /*-----------------------------------------------------------*/ /* masteronlyOverlap */ /* */ /* One MPI process sends single fixed length message to */ /* another MPI process. */ /* This other process then sends it back to the first */ /* process. */ /*-----------------------------------------------------------*/ int masteronlyOverlap(int totalReps, int dataSize){ int repIter, i; MPI_Request req; MPI_Status stat; for (repIter = 0; repIter < totalReps; repIter++){ /* All threads under MPI process with rank = pingRank * write to their part of the pingBuf array using a * parallel for directive. */ if (myMPIRank == pingRank) { #pragma omp parallel for default(none) \ private(i) \ shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \ schedule(static,dataSize) for(i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Ping process sends buffer to MPI process with rank equal to * pongRank. */ MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, pongRank, TAG, comm, &req); #pragma omp parallel for default(none) \ private(i,numThreads) \ schedule(static) for (i=0; i<numThreads; i++) { fwq(100000,100); } /* Finish the Send operation with an MPI_Wait */ MPI_Wait(&req, &stat); /* Process then waits for a message from pong process and * each thread reads its part of received buffer. */ MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongRank, \ TAG, comm, &stat); #pragma omp parallel for default(none) \ private(i) \ shared(pongRecvBuf,finalRecvBuf,dataSize,sizeofBuffer) \ schedule(static,dataSize) for(i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pongRecvBuf[i]; } } else if (myMPIRank == pongRank){ /* pongRank receives the message from the ping process */ MPI_Irecv(pingRecvBuf, sizeofBuffer, MPI_INT, pingRank, \ TAG, comm, &req); #pragma omp parallel for default(none) \ private(i,numThreads) \ schedule(static) for (i=0; i<numThreads; i++) { fwq(100000,100); } /* Finish the Receive operation with an MPI_Wait */ MPI_Wait(&req, &stat); /* each thread under the pongRank MPI process now copies * its part of the received buffer to pongSendBuf. */ #pragma omp parallel for default(none) \ private(i) \ shared(pongSendBuf,pingRecvBuf,dataSize,sizeofBuffer) \ schedule(static,dataSize) for(i=0; i< sizeofBuffer; i++){ pongSendBuf[i] = pingRecvBuf[i]; } /* pongRank process now sends pongSendBuf to ping process. */ MPI_Send(pongSendBuf, sizeofBuffer, MPI_INTEGER, pingRank, \ TAG, comm); } } return 0; } /*-----------------------------------------------------------*/ /* funnelledOverlap */ /* */ /* One MPI process sends single fixed length message to */ /* another MPI process. */ /* This other process then sends it back to the first */ /* process. */ /* All communication takes place within the OpenMP */ /* region for this benchmark. */ /*-----------------------------------------------------------*/ int funnelledOverlap(int totalReps, int dataSize){ int repIter, i; MPI_Request req; MPI_Status stat; /* Open parallel region for threads */ #pragma omp parallel \ private(i,repIter,req,stat) \ shared(pingRank,pongRank,pingSendBuf,pingRecvBuf) \ shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \ shared(dataSize,globalIDarray,comm,totalReps,myMPIRank) { for (repIter=0; repIter< totalReps; repIter++){ /* All threads under MPI process with rank = pingRank * write to its part of the pingBuf array using a * parallel do directive. */ if (myMPIRank == pingRank){ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Implicit barrier at end of for takes care of synchronisation */ /*Master thread under ping process sends buffer to * MPI process with rank equal to pongRank. */ #pragma omp master { MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, pongRank, TAG, comm, &req); fwq(100000,100); /* Master thread then waits for a message from pong process */ MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongRank, TAG, \ comm, &stat); /* Finish the Send operation with an MPI_Wait */ MPI_Wait(&req, &stat); } if(myThreadID != 0) fwq(100000,100); /* Barrier needed to wait for master thread to complete MPI_Recv */ #pragma omp barrier /*Each thread reads its part of received buffer */ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pongRecvBuf[i]; } } else if (myMPIRank == pongRank){ /* Master thread under pongRank receives the message * from the ping process. */ #pragma omp master { MPI_Irecv(pingRecvBuf, sizeofBuffer, MPI_INT, pingRank, TAG, comm, &req); fwq(100000,100); MPI_Wait(&req, &stat); } if(myThreadID != 0) fwq(100000,100); /* Barrier needed to wait on master thread */ #pragma omp barrier /* Each thread under the pongRank MPI process now copies its part * of the received buffer to pongSendBuf. */ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pongSendBuf[i] = pingRecvBuf[i]; } /* Implicit barrier at end of DO */ /* Master thread of pongRank process now sends pongSendBuf * to ping process. */ #pragma omp master { MPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingRank, TAG, comm); } } } /* end of repetitions */ } /* end of parallel region */ return 0; } /*-----------------------------------------------------------*/ /* serializedOverlap */ /* */ /* With this algorithm multiple threads take place in the */ /* communication and computation. */ /* Each thread under the MPI ping process sends a portion */ /* of the message to the other MPI process. */ /* Each thread of the other process then sends it back to */ /* the first process. Only 1 thread calls into MPI at a time.*/ /*-----------------------------------------------------------*/ int serializedOverlap(int totalReps, int dataSize){ int repIter, i, lBound, uBound; MPI_Request req; MPI_Status stat; int done; /* Open parallel region for threads under pingRank */ #pragma omp parallel \ private(i,repIter,lBound,req,stat,done) \ shared(pingRank,pongRank,pingSendBuf,pingRecvBuf) \ shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \ shared(dataSize,globalIDarray,comm,totalReps,myMPIRank) { for (repIter=0; repIter < totalReps; repIter++){ if (myMPIRank == pingRank){ /* Calculate lower bound of data array for the thread */ lBound = (myThreadID * dataSize); /* All threads under MPI process with rank = pingRank * write to their part of the pingBuf array using * a parallel for directive. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Implicit barrier not needed for multiple*/ /*Each thread under ping process sends dataSize items * to MPI process with rank equal to pongRank. * myThreadID is used as tag to ensure data goes to correct * place in recv buffer. */ #pragma omp critical { MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, pongRank, \ myThreadID, comm, &req); } fwq(100000,100); done = 0; while (done != 1) { #pragma omp critical MPI_Test(&req, &done, &stat); } #pragma omp critical { /* Thread then waits for a message from pong process. */ MPI_Irecv(&pongRecvBuf[lBound], dataSize, MPI_INT, pongRank, \ myThreadID, comm, &req); } done = 0; while (done != 1) { #pragma omp critical MPI_Test(&req, &done, &stat); } /* Each thread reads its part of the received buffer */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pongRecvBuf[i]; } } else if (myMPIRank == pongRank){ /* Calculate lower bound of the data array */ lBound = (myThreadID * dataSize); #pragma omp critical { /* Each thread under pongRank receives a message * from the ping process. */ MPI_Irecv(&pingRecvBuf[lBound], dataSize, MPI_INT, pingRank, \ myThreadID, comm, &req); } fwq(100000,100); done = 0; while (done != 1) { #pragma omp critical MPI_Test(&req, &done, &stat); } /* Each thread now copies its part of the received buffer * to pongSendBuf. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++) { pongSendBuf[i] = pingRecvBuf[i]; } #pragma omp critical { /* Each thread now sends pongSendBuf to ping process. */ MPI_Isend(&pongSendBuf[lBound], dataSize, MPI_INT, pingRank, \ myThreadID, comm, &req); } done = 0; while (done != 1) { #pragma omp critical MPI_Test(&req, &done, &stat); } } }/* end of repetitions */ } /* end of parallel region */ return 0; } /*-----------------------------------------------------------*/ /* multipleOverlap */ /* */ /* With this algorithm multiple threads take place in the */ /* communication and computation. */ /* Each thread under the MPI ping process sends a portion */ /* of the message to the other MPI process. */ /* Each thread of the other process then sends it back to */ /* the first process. */ /*-----------------------------------------------------------*/ int multipleOverlap(int totalReps, int dataSize){ int repIter, i, lBound, uBound; MPI_Request req; MPI_Status stat; /* Open parallel region for threads under pingRank */ #pragma omp parallel \ private(i,repIter,lBound,req,stat) \ shared(pingRank,pongRank,pingSendBuf,pingRecvBuf) \ shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \ shared(dataSize,globalIDarray,comm,totalReps,myMPIRank) { for (repIter=0; repIter < totalReps; repIter++){ if (myMPIRank == pingRank){ /* Calculate lower bound of data array for the thread */ lBound = (myThreadID * dataSize); /* All threads under MPI process with rank = pingRank * write to their part of the pingBuf array using * a parallel for directive. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Implicit barrier not needed for multiple*/ /*Each thread under ping process sends dataSize items * to MPI process with rank equal to pongRank. * myThreadID is used as tag to ensure data goes to correct * place in recv buffer. */ MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, pongRank, \ myThreadID, comm, &req); fwq(100000,100); MPI_Wait(&req, &stat); /* Thread then waits for a message from pong process. */ MPI_Recv(&pongRecvBuf[lBound], dataSize, MPI_INT, pongRank, \ myThreadID, comm, &stat); /* Each thread reads its part of the received buffer */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pongRecvBuf[i]; } } else if (myMPIRank == pongRank){ /* Calculate lower bound of the data array */ lBound = (myThreadID * dataSize); /* Each thread under pongRank receives a message * from the ping process. */ MPI_Irecv(&pingRecvBuf[lBound], dataSize, MPI_INT, pingRank, \ myThreadID, comm, &req); fwq(100000,100); MPI_Wait(&req, &stat); /* Each thread now copies its part of the received buffer * to pongSendBuf. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++) { pongSendBuf[i] = pingRecvBuf[i]; } /* Each thread now sends pongSendBuf to ping process. */ MPI_Send(&pongSendBuf[lBound], dataSize, MPI_INT, pingRank, \ myThreadID, comm); } }/* end of repetitions */ } /* end of parallel region */ return 0; } /*-----------------------------------------------------------*/ /* taskOverlap */ /* */ /* With this algorithm multiple tasks take place in the */ /* communication and computation. */ /* Each task under the MPI ping process sends a portion */ /* of the message to the other MPI process. */ /* Each task of the other process then sends it back to */ /* the first process. */ /*-----------------------------------------------------------*/ int taskOverlap(int totalReps, int dataSize){ int repIter, i, lBound, uBound; MPI_Request req; MPI_Status stat; /* Open parallel region for threads under pingRank */ #pragma omp parallel \ private(i,repIter,lBound,req,stat) \ shared(pingRank,pongRank,pingSendBuf,pingRecvBuf) \ shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \ shared(dataSize,globalIDarray,comm,totalReps,myMPIRank) { for (repIter=0; repIter < totalReps; repIter++){ if (myMPIRank == pingRank){ /* Calculate lower bound of data array for the thread */ lBound = (myThreadID * dataSize); /* All threads under MPI process with rank = pingRank * write to their part of the pingBuf array using * a parallel for directive. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ #pragma omp task pingSendBuf[i] = globalIDarray[myThreadID]; } #pragma omp taskwait /* Implicit barrier not needed for multiple*/ /*Each thread under ping process sends dataSize items * to MPI process with rank equal to pongRank. * myThreadID is used as tag to ensure data goes to correct * place in recv buffer. */ #pragma omp task { MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, pongRank, \ myThreadID, comm, &req); fwq(100000,100); MPI_Wait(&req, &stat); /* Thread then waits for a message from pong process. */ MPI_Recv(&pongRecvBuf[lBound], dataSize, MPI_INT, pongRank, \ myThreadID, comm, &stat); } /* Each thread reads its part of the received buffer */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ #pragma omp task finalRecvBuf[i] = pongRecvBuf[i]; } #pragma omp taskwait } else if (myMPIRank == pongRank){ /* Calculate lower bound of the data array */ lBound = (myThreadID * dataSize); /* Each thread under pongRank receives a message * from the ping process. */ #pragma omp task { MPI_Irecv(&pingRecvBuf[lBound], dataSize, MPI_INT, pingRank, \ myThreadID, comm, &req); fwq(100000,100); MPI_Wait(&req, &stat); } /* Each thread now copies its part of the received buffer * to pongSendBuf. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++) { #pragma omp task pongSendBuf[i] = pingRecvBuf[i]; } #pragma omp taskwait #pragma omp task /* Each thread now sends pongSendBuf to ping process. */ MPI_Send(&pongSendBuf[lBound], dataSize, MPI_INT, pingRank, \ myThreadID, comm); #pragma omp taskwait } }/* end of repetitions */ } /* end of parallel region */ return 0; } /*-----------------------------------------------------------*/ /* allocateData */ /* */ /* Allocates space for the main data arrays. */ /* Size of each array is specified by subroutine argument. */ /*-----------------------------------------------------------*/ int allocateOverlapData(int sizeofBuffer){ pingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int)); pingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int)); pongSendBuf = (int *)malloc(sizeofBuffer * sizeof(int)); pongRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int)); finalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int)); return 0; } /*-----------------------------------------------------------*/ /* freeData */ /* */ /* Deallocates the storage space for the main data arrays. */ /*-----------------------------------------------------------*/ int freeOverlapData(){ free(pingSendBuf); free(pingRecvBuf); free(pongSendBuf); free(pongRecvBuf); free(finalRecvBuf); return 0; } /*-----------------------------------------------------------*/ /* testOverlap */ /* */ /* Verifies that the Ping Pong benchmark worked correctly. */ /*-----------------------------------------------------------*/ int testOverlap(int sizeofBuffer,int dataSize){ int i, testFlag; int *testBuf; /* PingRank process checks if overlap worked ok. */ if (myMPIRank == pingRank){ /* initialise testFlag to true (test passed) */ testFlag = TRUE; /* allocate space for the testBuf */ testBuf = (int *)malloc(sizeofBuffer * sizeof(int)); /* construct testBuf array with correct values. * These are the values that should be in finalRecvBuf. */ #pragma omp parallel for default(none) \ private(i) \ shared(testBuf,dataSize,sizeofBuffer,globalIDarray) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ testBuf[i] = globalIDarray[myThreadID]; } /* compare each element of testBuf and finalRecvBuf */ for (i=0; i<sizeofBuffer; i++){ if (testBuf[i] != finalRecvBuf[i]){ testFlag = FALSE; } } /* free space for testBuf */ free(testBuf); } /* pingRank broadcasts testFlag to the other processes */ MPI_Bcast(&testFlag, 1, MPI_INT, pingRank, comm); /* Master process sets the testOutcome using testFlag. */ if (myMPIRank == 0){ setTestOutcome(testFlag); } return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-7,8)),ceild(4*t2-Nz-12,16));t3<=min(min(min(floord(4*t2+Ny,16),floord(Nt+Ny-4,16)),floord(2*t1+Ny+1,16)),floord(4*t1-4*t2+Nz+Ny-1,16));t3++) { for (t4=max(max(max(0,ceild(t1-1023,1024)),ceild(4*t2-Nz-2044,2048)),ceild(16*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t2+Nx,2048),floord(Nt+Nx-4,2048)),floord(2*t1+Nx+1,2048)),floord(16*t3+Nx+12,2048)),floord(4*t1-4*t2+Nz+Nx-1,2048));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),16*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),16*t3+14),2048*t4+2046),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
gridify-3.c
/* { dg-do compile } */ /* { dg-require-effective-target offload_hsa } */ /* { dg-options "-fopenmp -fdump-tree-omplower-details" } */ #define BLOCK_SIZE 16 void tiled_sgemm_tt(const int M, const int N, const int K, const float alpha, const float*A, const int LDA, const float*B, const int LDB, const float beta, float*C, const int LDC) { #pragma omp target teams map(to:A[M*K],B[K*N]) map(from:C[M*N]) #pragma omp distribute collapse(2) for (int C_row_start=0 ; C_row_start < M ; C_row_start+=BLOCK_SIZE) for (int C_col_start=0 ; C_col_start < N ; C_col_start+=BLOCK_SIZE) { float As[BLOCK_SIZE][BLOCK_SIZE]; float Bs[BLOCK_SIZE][BLOCK_SIZE]; float Cs[BLOCK_SIZE][BLOCK_SIZE]; int C_row, C_col; #pragma omp parallel for collapse(2) for (int row=0 ; row < BLOCK_SIZE ; row++) for (int col=0 ; col < BLOCK_SIZE ; col++) { Cs[row][col] = 0.0; } for (int kblock = 0; kblock < K ; kblock += BLOCK_SIZE ) { #pragma omp parallel for collapse(2) for (int row=0 ; row < BLOCK_SIZE ; row++) for (int col=0 ; col < BLOCK_SIZE ; col++) { C_row = C_row_start + row; C_col = C_col_start + col; if ((C_row < M) && (kblock + col < K)) As[row][col] = A[(C_row*LDA)+ kblock + col]; else As[row][col] = 0; if ((kblock + row < K) && C_col < N) Bs[row][col] = B[((kblock+row)*LDB)+ C_col]; else Bs[row][col] = 0; } #pragma omp parallel for collapse(2) for (int row=0 ; row < BLOCK_SIZE ; row++) for (int col=0 ; col < BLOCK_SIZE ; col++) { for (int e = 0; e < BLOCK_SIZE; ++e) Cs[row][col] += As[row][e] * Bs[e][col]; } } /* End for kblock .. */ #pragma omp parallel for collapse(2) for (int row=0 ; row < BLOCK_SIZE ; row++) for (int col=0 ; col < BLOCK_SIZE ; col++) { C_row = C_row_start + row; C_col = C_col_start + col; if ((C_row < M) && (C_col < N)) C[(C_row*LDC)+C_col] = alpha*Cs[row][col] + beta*C[(C_row*LDC)+C_col]; } } /* End distribute */ } /* { dg-final { scan-tree-dump "Target construct will be turned into a gridified HSA kernel" "omplower" } } */
MatrixFunctions.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_MATRIXFUNCTIONS_H #define BK_MATRIXFUNCTIONS_H #include <algorithm> #include <cassert> #include <cmath> #include <complex> #include <functional> #include <initializer_list> #include <iostream> #include <random> #include <bkMath/functions/equals_approx.h> #include <bkMath/matrix/MatrixAlignment.h> #include <bkMath/matrix/eigen_wrappers/QRDecomposition.h> #include <bkMath/matrix/eigen_wrappers/SVDecomposition.h> #include <bkMath/matrix/type_traits/matrix_traits.h> #include <bkMath/matrix/type_traits/common_type.h> #include <bkMath/matrix/type_traits/signed_type.h> #include <bkTools/random/random_ct.h> #include <bkTools/random/random.h> #include <bkTypeTraits/complex_traits.h> #include <bkTypeTraits/floating_point.h> namespace bk::details { template<typename TValue, typename TDerived> class MatrixFunctions { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = MatrixFunctions<TValue, TDerived>; using derived_type = TDerived; public: using value_type = TValue; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== protected: /// @{ -------------------------------------------------- CTOR constexpr MatrixFunctions() noexcept = default; constexpr MatrixFunctions(const self_type& other) noexcept = default; constexpr MatrixFunctions(self_type&& other) noexcept = default; /// @} /// @{ -------------------------------------------------- DTOR ~MatrixFunctions() noexcept = default; /// @} //==================================================================================================== //===== GETTER //==================================================================================================== private: /// @{ -------------------------------------------------- TO DERIVED [[nodiscard]] constexpr derived_type* deriv() noexcept { return static_cast<derived_type*>(this); } [[nodiscard]] constexpr const derived_type* deriv() const noexcept { return static_cast<const derived_type*>(this); } [[nodiscard]] constexpr const self_type* const_this() { return static_cast<const self_type*>(this); } /// @} /// @{ -------------------------------------------------- HELPER: ASSERTIONS template<typename TMatrix> [[nodiscard]] static constexpr bool _matrix_is_same_size_static(const TMatrix&) { return bk::matrix_traits_comp<derived_type, TMatrix>::static_size_matches || !bk::matrix_traits_comp<derived_type, TMatrix>::are_both_static; } template<typename TMatrix> [[nodiscard]] bool _matrix_is_same_size_runtime(const TMatrix& m) const { if (!bk::matrix_traits_comp(*deriv(), m).size_matches()) { #pragma omp critical { std::cout << " this matrix size: " << deriv()->num_rows() << " x " << deriv()->num_cols() << " ; other matrix size: " << m.num_rows() << " x " << m.num_cols() << std::endl; } return false; } return true; } /// @} public: /// @{ -------------------------------------------------- SIZE template<typename TMatrix> [[nodiscard]] constexpr bool has_same_size(const TMatrix& rhs) const noexcept { return deriv()->num_rows() == rhs.num_rows() && deriv()->num_cols() == rhs.num_cols(); } /// @} /// @{ -------------------------------------------------- ALIGNMENT [[nodiscard]] constexpr bool is_row_major() const noexcept { return deriv()->alignment() == MatrixAlignment::RowMajor; } [[nodiscard]] constexpr bool is_col_major() const noexcept { return deriv()->alignment() == MatrixAlignment::ColMajor; } template<typename TMatrix> [[nodiscard]] constexpr bool has_same_alignment(const TMatrix& rhs) const noexcept { return deriv()->alignment() == rhs.alignment(); } /// @} /// @{ -------------------------------------------------- ROWID/COLID <> LISTID //! get rowId from listId of internal data vector [[nodiscard]] static constexpr unsigned int rowId_from_listId(unsigned int listId, const unsigned int num_rows, const unsigned int num_cols, const bool is_colmajor) noexcept { return is_colmajor ? listId % num_rows : listId / num_cols; } [[nodiscard]] constexpr unsigned int rowId_from_listId(unsigned int listId) const { return rowId_from_listId(listId, deriv()->num_rows(), deriv()->num_cols(), this->is_col_major()); } //! get colId from listId of internal data vector [[nodiscard]] static constexpr unsigned int colId_from_listId(unsigned int listId, const unsigned int num_rows, const unsigned int num_cols, const bool is_colmajor) noexcept { return is_colmajor ? listId / num_rows : listId % num_cols; } [[nodiscard]] constexpr unsigned int colId_from_listId(unsigned int listId) const { return colId_from_listId(listId, deriv()->num_rows(), deriv()->num_cols(), this->is_col_major()); } //! convert rowId/colId to listId of internal data vector [[nodiscard]] static constexpr unsigned int listId_from_rowId_colId(const unsigned int rowId, const unsigned int colId, const unsigned int num_rows, const unsigned int num_cols, const bool is_colmajor) noexcept { return is_colmajor ? colId * num_rows + rowId : rowId * num_cols + colId; } [[nodiscard]] constexpr unsigned int listId_from_rowId_colId(unsigned int rowId, unsigned int colId) const noexcept { return listId_from_rowId_colId(rowId, colId, deriv()->num_rows(), deriv()->num_cols(), this->is_col_major()); } /// @} /// @{ -------------------------------------------------- GET COPY //! allocate same-typed matrix and copy all values template<typename TCopyValue = value_type> [[nodiscard]] constexpr auto copy() const { return typename derived_type::template self_template_type<TCopyValue>(*deriv()); } /// @} /// @{ -------------------------------------------------- HELPER: COPY BLOCK private: template<typename TSubMatrix> constexpr void _copy_block(TSubMatrix& res, unsigned int fromRowId, unsigned int toRowId, unsigned int fromColId, unsigned int toColId) const { for (unsigned int r = fromRowId; r <= toRowId; ++r) { for (unsigned int c = fromColId; c <= toColId; ++c) { res(r - fromRowId, c - fromColId) = (*deriv())(r, c); } } } public: /// @} /// @{ -------------------------------------------------- GET SUB MATRIX template<unsigned int fromRowId, unsigned int toRowId, unsigned int fromColId, unsigned int toColId> [[nodiscard]] constexpr auto sub_matrix() const { static_assert(fromRowId <= toRowId && fromColId <= toColId, "invalid from/to ids"); static_assert(bk::is_dynamic_matrix_v<derived_type> || (bk::is_static_matrix_v<derived_type> && toRowId < derived_type::RowsAtCompileTime() && toColId < derived_type::ColsAtCompileTime()), "invalid from/to ids"); assert(toRowId < deriv()->num_rows() && toColId < deriv()->num_cols()); using result_type = typename derived_type::template self_template_type<value_type, toRowId - fromRowId + 1, toColId - fromColId + 1>; result_type res; _copy_block(res, fromRowId, toRowId, fromColId, toColId); return res; } [[nodiscard]] constexpr auto sub_matrix(unsigned int fromRowId, unsigned int toRowId, unsigned int fromColId, unsigned int toColId) const { assert(fromRowId <= toRowId && fromColId <= toColId); assert(toRowId < deriv()->num_rows() && toColId < deriv()->num_cols()); using result_type = typename derived_type::template self_template_type<value_type, -1, -1>; result_type res; res.set_size(toRowId - fromRowId + 1, toColId - fromColId + 1); _copy_block(res, fromRowId, toRowId, fromColId, toColId); return res; } /// @} /// @{ -------------------------------------------------- GET ROW template<unsigned int Id> [[nodiscard]] constexpr auto row() const { return this->row(Id); } [[nodiscard]] constexpr auto row(unsigned int Id) const { using result_type = typename derived_type::template self_template_type<value_type, 1, derived_type::ColsAtCompileTime(), derived_type::AlignmentAtCompileTime()>; result_type res; if constexpr (bk::is_dynamic_matrix_v<result_type>) { res.set_size(1, deriv()->num_cols()); res.set_alignment(deriv()->alignment()); } this->_copy_block(res, Id, Id, 0, deriv()->num_cols() - 1); return res; } /// @} /// @{ -------------------------------------------------- GET COL template<unsigned int Id> [[nodiscard]] constexpr auto col() const { return this->col(Id); } [[nodiscard]] constexpr auto col(unsigned int Id) const { using result_type = typename derived_type::template self_template_type<value_type, derived_type::RowsAtCompileTime(), 1, derived_type::AlignmentAtCompileTime()>; result_type res; if constexpr (bk::is_dynamic_matrix_v<result_type>) { res.set_size(deriv()->num_rows(), 1); res.set_alignment(deriv()->alignment()); } this->_copy_block(res, 0, deriv()->num_rows() - 1, Id, Id); return res; } /// @} /// @{ -------------------------------------------------- GET DIAGONAL //! Extract diagonal (x_ii) as col-vector starting from top left (0,0) [[nodiscard]] constexpr auto diagonal() const { using result_type = typename derived_type::template self_template_type<value_type, std::min(derived_type::RowsAtCompileTime(), derived_type::ColsAtCompileTime()), 1>; result_type res; if constexpr (bk::is_dynamic_matrix_v<result_type>) { res.set_size(std::min(deriv()->num_rows(), deriv()->num_cols()), 1); res.set_alignment(deriv()->alignment()); } for (unsigned int i = 0; i < res.num_elements(); ++i) { res[i] = (*deriv())(i, i); } return res; } /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] constexpr self_type& operator=(const self_type&) = default; [[maybe_unused]] constexpr self_type& operator=(self_type&&) noexcept = default; template<typename T> [[maybe_unused]] constexpr self_type& operator=(T&& rhs) { this->set(std::forward<T>(rhs)); return *this; } template<typename T> [[maybe_unused]] constexpr self_type& operator=(std::initializer_list <T> rhs) { // if this overloading is not defined, a = {1,2,3,4} invokes a constructor call this->set(std::move(rhs)); return *this; } /// @} /// @{ -------------------------------------------------- HELPER: RESIZE MATRIX TO THIS SIZE template<typename TMatrix> void constexpr _resize_if_dynamic([[maybe_unused]] TMatrix& m, [[maybe_unused]] int rows = -1, [[maybe_unused]] int cols = -1, [[maybe_unused]] MatrixAlignment alignment = MatrixAlignment::ColMajor) const { if constexpr (bk::is_dynamic_matrix_v<TMatrix>) { if (rows == -1 || cols == -1) { m.set_size(deriv()->num_rows(), deriv()->num_cols()); m.set_alignment(deriv()->alignment()); } else { m.set_size(rows, cols); m.set_alignment(alignment); } } } /// @} /// @{ -------------------------------------------------- HELPERS: SET TO VALUE(S) protected: template<unsigned int I, typename T> constexpr void _set(T&& x) { (*deriv())[I] = std::forward<T>(x); } template<unsigned int I, typename T, typename... TL> constexpr void _set(T&& x, TL&& ... xi) { _set<I>(std::forward<T>(x)); _set<I + 1>(std::forward<TL>(xi)...); } /// @} public: /// @{ -------------------------------------------------- SET TO VALUE(S) template<typename T, typename... TL> constexpr void set(T&& x0, TL&& ... xi) { using T_ = std::decay_t<T>; constexpr unsigned int N = 1 + sizeof...(xi); if constexpr (N > 1 && !bk::is_matrix_v<T_>) { //------------------------------------------------------------------------------------------------------ // parameters are single values //------------------------------------------------------------------------------------------------------ static_assert(bk::is_dynamic_matrix_v<derived_type> || N == derived_type::NumElementsAtCompileTime(), "invalid number of arguments"); assert((bk::is_dynamic_matrix_v<derived_type> || N == deriv()->num_elements()) && "invalid number of arguments"); if constexpr (bk::is_dynamic_matrix_v<derived_type>) { // this == dynamic matrix: resize if necessary if (deriv()->num_elements() != N) { deriv()->set_size(N); } } else { // this == static matrix: check size static_assert(derived_type::NumElementsAtCompileTime() == N, "SIZE MISMATCH"); } _set<0U>(std::forward<T>(x0), std::forward<TL>(xi)...); } else if constexpr (N == 1) { //------------------------------------------------------------------------------------------------------ // parameter is matrix or constant value //------------------------------------------------------------------------------------------------------ if constexpr (bk::is_matrix_v<T_>) { // parameter is matrix if constexpr (bk::is_dynamic_matrix_v<derived_type>) { // this == dynamic matrix: resize if necessary if (!this->has_same_size(x0)) { deriv()->set_size(x0.num_rows(), x0.num_cols()); } } else { // this == static matrix: check size static_assert(_matrix_is_same_size_static(x0)); assert(_matrix_is_same_size_runtime(x0)); } // copy values if (this->has_same_alignment(x0)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = x0[i]; } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) = x0(r, c); } } } } else if constexpr (std::is_class_v < T_ >) { std::copy(x0.begin(), x0.end(), deriv()->begin()); } else // parameter is constant value { this->set_constant(std::forward<T>(x0)); } } } //! set: single values are given in initializer list void set(std::initializer_list <value_type> rhs) { assert(deriv()->num_elements() == rhs.size() && "invalid number of elements in initializer list"); std::copy(rhs.begin(), rhs.end(), deriv()->begin()); } /// @} /// @{ -------------------------------------------------- SET CONSTANT constexpr void set_constant(const value_type x) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = x; } } constexpr void set_one() { this->set_constant(1); } constexpr void set_zero() { this->set_constant(0); } /// @} /// @{ -------------------------------------------------- SET IDENTTY //! set to identity matrix (all values on diagonal are 1, rest is 0) constexpr void set_identity() { this->set_zero(); for (unsigned int i = 0; i < std::min(deriv()->num_rows(), deriv()->num_cols()); ++i) { (*deriv())(i, i) = 1; } } /// @} /// @{ -------------------------------------------------- SET RANDOM void set_random_int(int rmin, int rmax) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = bk::random::make_int(rmin, rmax); } } void set_random_float(double rmin = 0.0, double rmax = 1.0) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = bk::random::make_double(rmin, rmax); } } private: template<int I, typename RandEng> constexpr void _set_random_ct() { if constexpr (I == 0) { (*deriv())[I] = RandEng::value; } else { (*deriv())[I] = RandEng::value; this->_set_random_ct<I - 1, typename RandEng::next>(); } } public: //! generates a compile-time random uint vector with values between rmin and rmax. /*! * Usage: Random_ct<__LINE__*__COUNTER__>(min, max) * * - __COUNTER__ is a compile-time macro that is increased on each occurrence in the code * - __LINE__ is a compile-time macro that returns the code line * - write __LINE__*__COUNTER__ explicitly; do not use it in a for loop! * - This will create three identical vectors: * for(int = 0; i < 3; ++i) Mat3i::Random_ct<0,10,__LINE__*__COUNTER__> * * - Instead, manually unroll the loop */ template<unsigned long long seedAdd> constexpr void set_random_ct() { this->_set_random_ct<derived_type::RowsAtCompileTime() * derived_type::ColsAtCompileTime() - 1, bk::randi<0UL, (1UL << 31) - 1, bk::seed_from_buildtime() + seedAdd>>(); } /// @} /// @{ -------------------------------------------------- MODIFY LOWER/UPPER TRIANGULAR MATRIX constexpr void set_lower_triangle_constant(value_type x) { for (unsigned int c = 0; c < deriv()->num_cols() - 1; ++c) { // num_cols()-1 since there is no element below (end,end) for (unsigned int r = c + 1; r < deriv()->num_rows(); ++r) { (*deriv())(r, c) = x; } } } constexpr void set_lower_triangle_zeros() { this->set_lower_triangle_constant(0); } constexpr void set_lower_triangle_ones() { this->set_lower_triangle_constant(1); } constexpr void set_upper_triangle_constant(value_type x) { for (unsigned int c = 1; c < deriv()->num_cols(); ++c) { // c starts at second col (c=1) since there is no element above (0,0) for (unsigned int r = 0; r < c; ++r) { (*deriv())(r, c) = x; } } } constexpr void set_upper_triangle_zeros() { this->set_upper_triangle_constant(0); } constexpr void set_upper_triangle_ones() { this->set_upper_triangle_constant(1); } /// @} /// @{ -------------------------------------------------- MODIFY DIAGONAL constexpr void set_diagonal_constant(value_type x) { for (unsigned int i = 0; i < std::min(deriv()->num_rows(), deriv()->num_cols()); ++i) { (*deriv())(i, i) = x; } } constexpr void set_diagonal_zeros() { this->set_diagonal_constant(0); } constexpr void set_diagonal_ones() { this->set_diagonal_constant(1); } template<typename TVec> constexpr void set_diagonal(const TVec& v) { assert(((v.num_rows() == 1 && v.num_cols() == std::min(deriv()->num_rows(), deriv()->num_cols())) || (v.num_rows() == std::min(deriv()->num_rows(), deriv()->num_cols()) && v.num_cols() == 1)) && "set diagonal: invalid parameter (wrong size)"); for (unsigned int i = 0; i < std::min(deriv()->num_rows(), deriv()->num_cols()); ++i) { (*deriv())(i, i) = v[i]; } } /// @} //==================================================================================================== //===== PROPERTIES //==================================================================================================== /// @{ -------------------------------------------------- IS SYMMETRIC [[nodiscard]] constexpr bool is_symmetric(double precision = _precision_of<double>()) const { if (!bk::matrix_traits(*deriv()).is_square()) { return false; } // iterate over lower triangular matrix for (unsigned int c = 0; c < deriv()->num_cols() - 1; ++c) { // num_cols()-1 since there is no element below (end,end) for (unsigned int r = c + 1; r < deriv()->num_rows(); ++r) { if (!bk::equals_approx((*deriv())(r, c), (*deriv())(c, r), precision)) { return false; } } } return true; } /// @} /// @{ -------------------------------------------------- IS DIAGONAL //! all elements except diagonal are zero [[nodiscard]] constexpr bool is_diagonal(double precision = _precision_of<double>()) const { return bk::matrix_traits(*deriv()).is_square() && is_lower_triangular(precision) && is_upper_triangular(precision); } /// @} /// @{ -------------------------------------------------- IS TRIANGULAR //! check if lower triangle is zero [[nodiscard]] constexpr bool is_upper_triangular(double precision = _precision_of<double>()) const { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { for (unsigned int r = c + 1; r < deriv()->num_rows(); ++r) { if (!bk::equals_approx((*deriv())(r, c), 0, precision)) { return false; } } } return true; } //! check if upper triangle is zero [[nodiscard]] constexpr bool is_lower_triangular(double precision = _precision_of<double>()) const { for (unsigned int c = 1; c < deriv()->num_cols(); ++c) { // c=1 since there is no element above (0,0) for (unsigned int r = 0; r < c; ++r) { if (!bk::equals_approx((*deriv())(r, c), 0, precision)) { return false; } } } return true; } [[nodiscard]] constexpr bool is_triangular(double precision = _precision_of<double>()) const { return is_upper_triangular(precision) || is_lower_triangular(precision); } /// @} /// @{ -------------------------------------------------- IS HESSENBERG //! check if lower triangle (except extra diagonal) is zero [[nodiscard]] constexpr bool is_upper_hessenberg(double precision = _precision_of<double>()) const { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { for (unsigned int r = c + 2; r < deriv()->num_rows(); ++r) { if (!bk::equals_approx((*deriv())(r, c), 0, precision)) { return false; } } } return true; } //! check if upper triangle (except extra diagonal) is zero [[nodiscard]] constexpr bool is_lower_hessenberg(double precision = _precision_of<double>()) const { for (unsigned int c = 1; c < deriv()->num_cols(); ++c) { // c=1 since there is no element above (0,0) for (unsigned int r = 0; r < c - 2; ++r) { // c-2 since non-zero (0,1) and (1,1) are valid if (!bk::equals_approx((*deriv())(r, c), 0, precision)) { return false; } } } return true; } [[nodiscard]] constexpr bool is_hessenberg(double precision = _precision_of<double>()) const { return is_upper_hessenberg(precision) || is_lower_hessenberg(precision); } /// @} /// @{ -------------------------------------------------- IS ORTHOGONAL [[nodiscard]] constexpr bool is_orthogonal(double precision = _precision_of<double>()) const { if (!bk::matrix_traits(*deriv()).is_square()) { return false; } for (unsigned int c = 0; c < deriv()->num_cols() - 1; ++c) { // for all but last cols for (unsigned int cr = c + 1; cr < deriv()->num_cols(); ++cr) { // for cols right of current col if (!bk::equals_approx(col(c).dot(col(cr)), 0, precision) /*not perpendicular*/) { return false; } } } return true; } [[nodiscard]] constexpr bool is_orthonormal(double precision = _precision_of<double>()) const { if (!bk::matrix_traits(*deriv()).is_square()) { return false; } for (unsigned int c = 0; c < deriv()->num_cols() - 1; ++c) // for all but last col { for (unsigned int cr = c + 1; cr < deriv()->num_cols(); ++cr) // for cols right of current col { const auto current_col = col(c); if (!bk::equals_approx(current_col.norm(), 1, precision) /*length not normalized*/ || !bk::equals_approx(current_col.dot(col(cr)), 0, precision) /*not perpendicular*/) { return false; } } } return true; } /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- SWAP MATRIX //! exchange values with other matrix /*! * Both matrices are intact afterwards. Swap is equivalent to an _internal function, * i.e. 'this' will be changed by 'swap' */ template<typename TMatrix> constexpr void swap(TMatrix& other) { static_assert(_matrix_is_same_size_static(other)); assert(_matrix_is_same_size_runtime(other)); if (has_same_alignment(other)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { value_type temp = (*deriv())[i]; (*deriv())[i] = other[i]; other[i] = std::move(temp); } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { typename TMatrix::value_type temp = (*deriv())(r, c); (*deriv())(r, c) = other(r, c); other(r, c) = std::move(temp); } } } } /// @} /// @{ -------------------------------------------------- SWAP ROWS [[nodiscard]] constexpr auto swap_rows(unsigned int i, unsigned int k) const& { auto res = copy(); res.swap_rows_internal(i, k); return res; } [[nodiscard]] constexpr auto swap_rows(unsigned int i, unsigned int k)&& { swap_rows_internal(i, k); return std::move(*deriv()); } constexpr void swap_rows_internal(unsigned int i, unsigned int k) { assert(i < deriv()->num_rows() && k < deriv()->num_rows() && "swap_rows_internal: invalid row ids"); value_type temp = 0; for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { temp = (*deriv())(i, c); (*deriv())(i, c) = (*deriv())(k, c); (*deriv())(k, c) = temp; } } /// @} /// @{ -------------------------------------------------- SWAP COLS [[nodiscard]] constexpr auto swap_cols(unsigned int i, unsigned int k) const& { auto res = copy(); res.swap_cols_internal(i, k); return res; } [[nodiscard]] constexpr auto swap_cols(unsigned int i, unsigned int k)&& { swap_cols_internal(i, k); return std::move(*deriv()); } constexpr void swap_cols_internal(unsigned int i, unsigned int k) { assert(i >= 0 && k >= 0 && i < deriv()->num_cols() && k < deriv()->num_cols() && "swap_cols_internal: invalid col ids"); value_type temp = 0; for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { temp = (*deriv())(r, i); (*deriv())(r, i) = (*deriv())(r, k); (*deriv())(r, k) = temp; } } /// @} /// @{ -------------------------------------------------- NORMALIZE //! scales the vector to length 1 [[nodiscard]] constexpr auto normalize() const& { auto res = this->copy<bk::make_floating_point_t<value_type>>(); res /= norm(); return res; } [[nodiscard]] constexpr auto normalize()&& { if constexpr (bk::is_floating_point_v<value_type>) { normalize_internal(); return std::move(*deriv()); } else { return const_this()->normalize(); } } constexpr void normalize_internal() { static_assert(bk::is_floating_point_v<value_type>, "DO NOT USE NORMALIZE_INTERNAL() WITH INTEGRAL TYPES"); if (!bk::equals_approx(norm(), 0, _precision_of<double>())) { operator/=(norm()); } } /// @} /// @{ -------------------------------------------------- NEGATE //! Multiply each element by -1 [[nodiscard]] constexpr auto negate() const& { auto res = copy<bk::make_signed_t<value_type>>(); res.negate_internal(); return res; } [[nodiscard]] constexpr auto negate()&& { if constexpr (bk::is_signed_v<value_type>) { negate_internal(); return std::move(*deriv()); } else { return const_this()->negate(); } } constexpr void negate_internal() { static_assert(bk::is_signed_v<value_type>, "DO NOT USE NEGATE_INTERNAL() WITH UNSIGNED TYPES"); for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = -(*deriv())[i]; } } /// @} /// @{ -------------------------------------------------- TRANSPOSE [[nodiscard]] constexpr auto transpose() const& { if constexpr (bk::is_static_matrix_v<derived_type>) { using result_type = typename derived_type::template self_template_type<value_type, derived_type::ColsAtCompileTime(), derived_type::RowsAtCompileTime()>; result_type res; for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { res(c, r) = (*deriv())(r, c); } } return res; } else // if constexpr (bk::is_dynamic_matrix_v<derived_type>) { auto res = copy(); res.transpose_internal(); return res; } } [[nodiscard]] auto transpose()&& { if constexpr (bk::is_dynamic_matrix_v<derived_type>) { transpose_internal(); return std::move(*deriv()); } else { return const_this()->transpose(); } } constexpr void transpose_internal() { static_assert(bk::is_static_square_matrix_v<derived_type> || bk::is_dynamic_matrix_v<derived_type>); if constexpr (bk::is_dynamic_matrix_v<derived_type>) { deriv()->set_size(deriv()->num_cols(), deriv()->num_rows()); deriv()->set_alignment(is_row_major() ? MatrixAlignment::ColMajor : MatrixAlignment::RowMajor); } else if constexpr (bk::is_static_square_matrix_v<derived_type>) { for (unsigned int r = 1; r < deriv()->num_rows(); ++r) { // r starts at 1 since there is no need to swap (0,0) for (unsigned int c = 0; c < r; ++c) { value_type temp = (*deriv())(r, c); (*deriv())(r, c) = (*deriv())(c, r); (*deriv())(c, r) = std::move(temp); } } } } /// @} /// @{ -------------------------------------------------- DOT PRODUCT //! sum of element-wise multiplications template<typename TMatrix> [[nodiscard]] constexpr auto dot(const TMatrix& rhs) const -> bk::signed_common_type_t<value_type, typename TMatrix::value_type> { static_assert(bk::matrix_traits_comp<derived_type, TMatrix>::static_num_elements_matches || !bk::matrix_traits_comp<derived_type, TMatrix>::are_both_static); assert(bk::matrix_traits_comp(*deriv(), rhs).num_elements_matches()); using result_type = bk::signed_common_type_t<value_type, typename TMatrix::value_type>; result_type temp = 0; for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { temp += (*deriv())[i] * rhs[i]; } return temp; } //! alias for dot product template<typename TMatrix> [[nodiscard]] constexpr auto inner_product(const TMatrix& rhs) const { return dot(rhs); } /// @} /// @{ -------------------------------------------------- NORM //! maximum column sum where absolute of each element is used [[nodiscard]] constexpr value_type norm1() const { value_type n = 0; for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { value_type temp = 0; for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { if constexpr (bk::is_signed_v<value_type>) { temp += std::abs((*deriv())(r, c)); } else { temp += (*deriv())(r, c); } } n = std::max(n, temp); } return n; } //! maximum row sum where absolute of each element is used [[nodiscard]] constexpr value_type normInf() const { value_type n = 0; for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { value_type temp = 0; for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { if constexpr (bk::is_signed_v<value_type>) { temp += std::abs((*deriv())(r, c)); } else { temp += (*deriv())(r, c); } } n = std::max(n, temp); } return n; } //! square root of sum of all elements squared (default) [[nodiscard]] constexpr auto norm2() const { return std::sqrt(norm2_squared()); } [[nodiscard]] constexpr value_type norm2_squared() const { value_type n = 0; for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { // additional abs() is adjustment for complex value_type if constexpr (bk::is_signed_v<value_type>) { n += std::abs((*deriv())[i]) * std::abs((*deriv())[i]); } else { n += (*deriv())[i] * (*deriv())[i]; } } return n; } //! alias for norm2(_squared) [[nodiscard]] constexpr auto norm() const { return norm2(); } [[nodiscard]] constexpr value_type norm_squared() const { return norm2_squared(); } /// @} /// @{ -------------------------------------------------- NORMALIZE COLS //! normalize each column/row vector to length 1 [[nodiscard]] constexpr auto normalize_cols() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.normalize_cols_internal(); return res; } [[nodiscard]] constexpr auto normalize_cols()&& { if constexpr (bk::is_floating_point_v<value_type>) { normalize_cols_internal(); return std::move(*deriv()); } else { return const_this()->normalize_cols(); } } constexpr void normalize_cols_internal() { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { const auto n = this->col(c).norm(); for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { (*deriv())(r, c) /= n; } } } /// @} /// @{ -------------------------------------------------- NORMALIZE ROWS [[nodiscard]] constexpr auto normalize_rows() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.normalize_rows_internal(); return res; } [[nodiscard]] constexpr auto normalize_rows()&& { if constexpr (bk::is_floating_point_v<value_type>) { normalize_rows_internal(); return std::move(*deriv()); } else { return const_this()->normalize_rows(); } } constexpr void normalize_rows_internal() { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { const auto n = this->row(r).norm(); for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) /= n; } } } /// @} /// @{ -------------------------------------------------- RANK //! determines the rank of the matrix using QR decomposition (counting zero-rows in R) [[nodiscard]] int rank_via_qr() const { return qr().rank(); } [[nodiscard]] int rank_via_qr(double tolerance) const { return qr().rank(tolerance); } [[nodiscard]] int rank_via_svd() const { return svd().rank(); } [[nodiscard]] int rank_via_svd(double tolerance) const { return svd().rank(tolerance); } [[nodiscard]] int rank() const { return rank_via_svd(); } [[nodiscard]] int rank(double tolerance) const { return rank_via_svd(tolerance); } /// @} /// @{ -------------------------------------------------- QR DECOMPOSITION /*! * Decompose the matrix so that A = Q*R, where Q is orthogonal and R is an upper triangular matrix. * Can be used to solve linear equation systems. * * \return Object that contains Q/R and allows to solve regarding rhs vector */ [[nodiscard]] auto qr() const { return QRDecomposition<derived_type>(*deriv()); } /// @} /// @{ -------------------------------------------------- SV DECOMPOSITION /*! * Decompose the matrix so that A[r,c] = U[r,r] * S[r,c] * V[c,c].transpose(). * * U are the eigenvectors of A*A.transpose() and V are the eigenvectors of A.transpose()*A. * Both have the same eigenvalues. S contains the sqrt of these eigenvalues in descending order * so that the largest eigenvalues is at (0,0) * * https://www.ling.ohio-state.edu/~kbaker/pubs/Singular_TValueDecomposition_Tutorial.pdf */ [[nodiscard]] auto svd(bool computeFullU = false, bool computeFullV = false) const { return SVDecomposition<derived_type>(*deriv(), computeFullU, computeFullV); } /// @} /// @{ -------------------------------------------------- PSEUDO INVERSE //! pseudo inverse via svd [[nodiscard]] auto pseudo_inverse(bk::make_floating_point_t<value_type> precision = _precision_of<bk::make_floating_point_t<value_type>>()) const { return svd(true, true).pseudo_inverse(precision); } /// @} /// @{ -------------------------------------------------- CONDITION //! condition number via svd /*! * Calculates the ratio of the largest and the smallest (non-zero) singular value. * Lower conditions (min = 0) are better */ auto condition(bk::make_floating_point_t<value_type> precision = _precision_of<bk::make_floating_point_t<value_type>>()) const { return svd().condition(precision); } /// @} /// @{ -------------------------------------------------- INTERPOLATE LINEAR //! (1-alpha)*this + alpha*rhs /*! * The weight should be between 0 and 1. However, other values * are also excepted which yields extrapolation */ template<typename TMatrix> constexpr void interpolate_linear_internal(const TMatrix& rhs, double alpha) { static_assert(_matrix_is_same_size_static(rhs)); assert(_matrix_is_same_size_runtime(rhs)); if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] *= 1.0 - alpha; (*deriv())[i] += alpha * rhs[i]; } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) *= 1.0 - alpha; (*deriv())(r, c) += alpha * rhs(r, c); } } } } template<typename TMatrix> [[nodiscard]] constexpr auto interpolate_linear(const TMatrix& rhs, double alpha) const& { auto res = this->copy<bk::make_floating_point_t<value_type>>(); res.interpolate_linear_internal(rhs, alpha); return res; } template<typename TMatrix> [[nodiscard]] constexpr auto interpolate_linear(const TMatrix& rhs, double alpha)&& { if constexpr (bk::is_floating_point_v<value_type>) { interpolate_linear_internal(rhs, alpha); return std::move(*deriv()); } else { return const_this()->interpolate_linear(rhs, alpha); } } /// @} //==================================================================================================== //===== COMPARISON //==================================================================================================== /// @{ -------------------------------------------------- EQUALITY template<typename TMatrix> [[nodiscard]] constexpr bool operator==(const TMatrix& other) const { static_assert(bk::is_matrix_v<TMatrix>); const unsigned int r = deriv()->num_rows(); const unsigned int c = deriv()->num_cols(); if (r != other.num_rows() || c != other.num_cols()) { return false; } for (unsigned int ri = 0; ri < r; ++ri) { for (unsigned int ci = 0; ci < c; ++ci) { if ((*deriv())(ri, ci) != other(ri, ci)) { return false; } } } return true; } template<typename TMatrix> [[nodiscard]] constexpr bool operator!=(const TMatrix& other) const { return !operator==(other); } /// @} private: /// @{ -------------------------------------------------- HELPER: GENERIC COMPARISON template<typename TScalar, typename TRes, typename TCompareFunction> constexpr void _compare_to_scalar(const TScalar& rhs, TRes& res, TCompareFunction cmp) const { using common_value_type = bk::signed_common_type_t<value_type, TScalar>; using res_value_type = typename std::decay_t<TRes>::value_type; for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { res[i] = static_cast<res_value_type>(cmp(static_cast<common_value_type>((*deriv())[i]), static_cast<common_value_type>(rhs))); } } template<typename TMatrix, typename TRes, typename TCompareFunction> constexpr void _compare_to_matrix(const TMatrix& rhs, TRes& res, TCompareFunction cmp) const { static_assert(_matrix_is_same_size_static(rhs)); assert(_matrix_is_same_size_runtime(rhs)); using common_value_type = bk::signed_common_type_t<value_type, typename std::decay_t<TMatrix>::value_type>; using res_value_type = typename std::decay_t<TRes>::value_type; if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { res[i] = static_cast<res_value_type>(cmp(static_cast<common_value_type>((*deriv())[i]), static_cast<common_value_type>(rhs[i]))); } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { res(r, c) = static_cast<res_value_type>(cmp(static_cast<common_value_type>((*deriv())(r, c)), static_cast<common_value_type>(rhs(r, c)))); } } } } /// @} public: /// @{ -------------------------------------------------- EQUAL CWISE template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto is_equal_cwise(const TMatrixOrScalar& rhs) const { using result_type = typename derived_type::template self_template_type<bool>; result_type res; _resize_if_dynamic(res); if constexpr (std::is_arithmetic_v < TMatrixOrScalar > || bk::is_complex_v<TMatrixOrScalar>) { _compare_to_scalar(rhs, res, std::equal_to<value_type>()); } else if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { _compare_to_matrix(rhs, res, std::equal_to<value_type>()); } else { res.set_constant(false); } return res; } /// @} /// @{ -------------------------------------------------- NOTM EQUAL CWISE template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto is_not_equal_cwise(const TMatrixOrScalar& rhs) const { using result_type = typename derived_type::template self_template_type<bool>; result_type res; _resize_if_dynamic(res); if constexpr (std::is_arithmetic_v < TMatrixOrScalar > || bk::is_complex_v<TMatrixOrScalar>) { _compare_to_scalar(rhs, res, std::not_equal_to<value_type>()); } else if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { _compare_to_matrix(rhs, res, std::not_equal_to<value_type>()); } else { res.set_constant(false); } return res; } /// @} /// @{ -------------------------------------------------- LESSER CWISE template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto is_lesser_cwise(const TMatrixOrScalar& rhs) const { using result_type = typename derived_type::template self_template_type<bool>; result_type res; _resize_if_dynamic(res); if constexpr (std::is_arithmetic_v < TMatrixOrScalar > || bk::is_complex_v<TMatrixOrScalar>) { _compare_to_scalar(rhs, res, std::less<value_type>()); } else if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { _compare_to_matrix(rhs, res, std::less<value_type>()); } else { res.set_constant(false); } return res; } /// @} /// @{ -------------------------------------------------- LESSER EQUAL CWISE template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto is_lesser_equal_cwise(const TMatrixOrScalar& rhs) const { using result_type = typename derived_type::template self_template_type<bool>; result_type res; _resize_if_dynamic(res); if constexpr (std::is_arithmetic_v < TMatrixOrScalar > || bk::is_complex_v<TMatrixOrScalar>) { _compare_to_scalar(rhs, res, std::less_equal<value_type>()); } else if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { _compare_to_matrix(rhs, res, std::less_equal<value_type>()); } else { res.set_constant(false); } return res; } /// @} /// @{ -------------------------------------------------- GREATER CWISE template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto is_greater_cwise(const TMatrixOrScalar& rhs) const { using result_type = typename derived_type::template self_template_type<bool>; result_type res; _resize_if_dynamic(res); if constexpr (std::is_arithmetic_v < TMatrixOrScalar > || bk::is_complex_v<TMatrixOrScalar>) { _compare_to_scalar(rhs, res, std::greater<value_type>()); } else if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { _compare_to_matrix(rhs, res, std::greater<value_type>()); } else { res.set_constant(false); } return res; } /// @} /// @{ -------------------------------------------------- GREATER EQUAL CWISE template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto is_greater_equal_cwise(const TMatrixOrScalar& rhs) const { using result_type = typename derived_type::template self_template_type<bool>; result_type res; _resize_if_dynamic(res); if constexpr (std::is_arithmetic_v < TMatrixOrScalar > || bk::is_complex_v<TMatrixOrScalar>) { _compare_to_scalar(rhs, res, std::greater_equal<value_type>()); } else if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { _compare_to_matrix(rhs, res, std::greater_equal<value_type>()); } else { res.set_constant(false); } return res; } /// @} /// @{ -------------------------------------------------- MIN CWISE template<typename TMatrixOrScalar> constexpr void min_cwise_internal(const TMatrixOrScalar& rhs) { if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { assert(_matrix_is_same_size_runtime(rhs)); if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::min((*deriv())[i], static_cast<value_type>(rhs[i])); } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) = std::min((*deriv())(r, c), static_cast<value_type>(rhs(r, c))); } } } } else { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::min((*deriv())[i], static_cast<value_type>(rhs)); } } } template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto min_cwise(const TMatrixOrScalar& rhs) const { using T = std::conditional_t<bk::is_matrix_v<TMatrixOrScalar>, typename TMatrixOrScalar::value_type, TMatrixOrScalar>; auto res = copy<bk::signed_common_type_t<value_type, T>>(); res.min_cwise_internal(rhs); return res; } /// @} /// @{ -------------------------------------------------- MAX CWISE template<typename TMatrixOrScalar> constexpr void max_cwise_internal(const TMatrixOrScalar& rhs) { if constexpr (bk::is_matrix_v<TMatrixOrScalar>) { assert(_matrix_is_same_size_runtime(rhs)); if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::max((*deriv())[i], static_cast<value_type>(rhs[i])); } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) = std::max((*deriv())(r, c), static_cast<value_type>(rhs(r, c))); } } } } else { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::max((*deriv())[i], static_cast<value_type>(rhs)); } } } template<typename TMatrixOrScalar> [[nodiscard]] constexpr auto max_cwise(const TMatrixOrScalar& rhs) const { using T = std::conditional_t<bk::is_matrix_v<TMatrixOrScalar>, typename TMatrixOrScalar::value_type, TMatrixOrScalar>; auto res = copy<bk::signed_common_type_t<value_type, T>>(); res.max_cwise_internal(rhs); return res; } /// @} //==================================================================================================== //===== MATH OPERATORS //==================================================================================================== /// @{ -------------------------------------------------- ADD SCALAR OR COMPLEX template<typename T> constexpr void add_scalar_cwise_internal(const T& rhs) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] += rhs; } } /// @} /// @{ -------------------------------------------------- ADD MATRIX template<typename TMatrix> constexpr void add_matrix_cwise_internal(const TMatrix& rhs) { static_assert(_matrix_is_same_size_static(rhs)); assert(_matrix_is_same_size_runtime(rhs)); if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] += rhs[i]; } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) += rhs(r, c); } } } } /// @} /// @{ -------------------------------------------------- OPERATOR + template<typename T> [[nodiscard]] constexpr auto operator+(T&& rhs) const& { using T_ = std::decay_t<T>; if constexpr (bk::is_matrix_v<T_>) { if constexpr (!std::is_lvalue_reference_v < T > && !bk::is_ref_matrix_v<T_> && !std::is_const_v < T > && bk::is_signed_common_type_v<typename T_::value_type, value_type>) { /* * const this& + Matrix&& (non-const non-ref common_t) -> rhs&& */ rhs.add_matrix_cwise_internal(*deriv()); return rhs; } else { using common_value_type = bk::signed_common_type_t<value_type, typename T_::value_type>; auto res = copy<common_value_type>(); res.add_matrix_cwise_internal(rhs); return res; } } else { using common_value_type = bk::signed_common_type_t<value_type, T_>; auto res = copy<common_value_type>(); res.add_scalar_cwise_internal(rhs); return res; } } template<typename T> [[nodiscard]] constexpr auto operator+(T&& rhs)&& { using T_ = std::decay_t<T>; if constexpr (bk::is_matrix_v<T_>) { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_signed_common_type_v<value_type, typename T_::value_type>) { /* * this&& (non-const non-ref common_t) + Matrix -> this&& */ add_matrix_cwise_internal(rhs); return std::move(*deriv()); } } else if constexpr (bk::is_signed_common_type_v<value_type, T_>) { /* * this&& (non-const non-ref common_t) + (scalar or complex) -> this&& */ add_scalar_cwise_internal(rhs); return std::move(*deriv()); } return const_this()->operator+(std::forward<T>(rhs)); } /// @} /// @{ -------------------------------------------------- OPERATOR += template<typename T> constexpr void operator+=(T&& rhs) { if constexpr (bk::is_matrix_v<std::decay_t < T>>) { add_matrix_cwise_internal(rhs); } else { add_scalar_cwise_internal(rhs); } } /// @} /// @{ -------------------------------------------------- SUBTRACT SCALAR OR COMPLEX template<typename T> constexpr void subtract_scalar_internal(const T& rhs) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] -= rhs; } } /// @} /// @{ -------------------------------------------------- SUBTRACT MATRIX template<typename TMatrix> constexpr void subtract_matrix_internal(const TMatrix& rhs) { static_assert(_matrix_is_same_size_static(rhs)); assert(_matrix_is_same_size_runtime(rhs)); if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] -= rhs[i]; } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) -= rhs(r, c); } } } } /// @} /// @{ -------------------------------------------------- OPERATOR - [[nodiscard]] constexpr auto operator-() const& { return negate(); } [[nodiscard]] constexpr auto operator-()&& { if constexpr (bk::is_signed_common_type_v<value_type, value_type, true /*enforceSign*/>) { negate_internal(); return std::move(*deriv()); } else { return const_this()->operator-(); } } template<typename T> [[nodiscard]] constexpr auto operator-(T&& rhs) const& { using T_ = std::decay_t<T>; if constexpr (bk::is_matrix_v<T_>) { if constexpr (!std::is_lvalue_reference_v < T > && !bk::is_ref_matrix_v<T> && !std::is_const_v < T > && bk::is_signed_common_type_v<typename T_::value_type, value_type, true /*enforceSign*/>) { /* * const this& - Matrix&& (non-const non-ref common_t) -> rhs&& */ rhs.negate_internal(); rhs.add_matrix_cwise_internal(*deriv()); return rhs; } else { using common_value_type = bk::signed_common_type_t<value_type, typename T_::value_type, true /*enforceSign*/>; auto res = copy<common_value_type>(); res.subtract_matrix_internal(rhs); return res; } } else { using common_value_type = bk::signed_common_type_t<value_type, T_, true /*enforceSign*/>; auto res = copy<common_value_type>(); res.subtract_scalar_internal(rhs); return res; } } template<typename T> [[nodiscard]] constexpr auto operator-(T&& rhs)&& { using T_ = std::decay_t<T>; if constexpr (!bk::is_ref_matrix_v<derived_type>) { if constexpr (bk::is_matrix_v<T_> && bk::is_signed_common_type_v<value_type, typename T_::value_type, true /*enforceSign*/>) { /* * this&& (non-const non-ref common_t) - Matrix -> this&& */ subtract_matrix_internal(std::forward<T>(rhs)); return std::move(*deriv()); } else if constexpr (bk::is_signed_common_type_v<value_type, T_, true /*enforceSign*/>) { /* * this&& (non-const non-ref common_t) - (scalar or complex) -> this&& */ subtract_scalar_internal(std::forward<T>(rhs)); return std::move(*deriv()); } } return const_this()->operator-(std::forward<T>(rhs)); } /// @} /// @{ -------------------------------------------------- OPERATOR -= template<typename T> constexpr void operator-=(const T& rhs) { if constexpr (bk::is_matrix_v<T>) { subtract_matrix_internal(rhs); } else { subtract_scalar_internal(rhs); } } /// @} /// @{ -------------------------------------------------- MULTIPLY SCALAR OR COMPLEX template<typename T> constexpr void mult_scalar_internal(const T& rhs) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] *= rhs; } } /// @} /// @{ -------------------------------------------------- MULTIPLY MATRIX private: template<typename TMatrix> [[nodiscard]] constexpr auto _mult_matrix(const TMatrix& rhs) const { static_assert(bk::is_dynamic_matrix_v<derived_type> || bk::is_dynamic_matrix_v<TMatrix> || (derived_type::ColsAtCompileTime() == TMatrix::RowsAtCompileTime()), "invalid matrix sizes for multiplication"); using common_value_type = bk::signed_common_type_t<value_type, typename TMatrix::value_type>; using result_type = typename derived_type::template self_template_type<common_value_type, derived_type::RowsAtCompileTime(), TMatrix::ColsAtCompileTime()>; result_type res; _resize_if_dynamic(res, deriv()->num_rows(), rhs.num_cols()); res.set_zero(); for (unsigned int c = 0; c < res.num_cols(); ++c) { for (unsigned int r = 0; r < res.num_rows(); ++r) { for (unsigned int i = 0; i < deriv()->num_cols(); ++i) { res(r, c) += (*deriv())(r, i) * rhs(i, c); } } } return res; } public: /// @} /// @{ -------------------------------------------------- OPERATOR * template<typename T> [[nodiscard]] constexpr auto operator*(const T& rhs) const& { if constexpr (bk::is_matrix_v<T>) { return _mult_matrix(rhs); } else { using common_value_type = bk::signed_common_type_t<value_type, T>; auto res = copy<common_value_type>(); res.mult_scalar_internal(rhs); return res; } } template<typename T> [[nodiscard]] constexpr auto operator*(const T& rhs)&& { if constexpr (!bk::is_matrix_v<T> && !bk::is_ref_matrix_v<derived_type> && bk::is_signed_common_type_v<value_type, T>) { mult_scalar_internal(rhs); return std::move(*deriv()); } else { return const_this()->operator*(rhs); } } /// @} /// @{ -------------------------------------------------- MULTIPLY MATRIX CWISE template<typename T> constexpr void mult_matrix_cwise_internal(const T& rhs) { static_assert(_matrix_is_same_size_static(rhs)); assert(_matrix_is_same_size_runtime(rhs)); if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] *= rhs[i]; } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) *= rhs(r, c); } } } } /// @} /// @{ -------------------------------------------------- MULTIPLY SCALAR CWISE constexpr void mult_scalar_cwise_internal(value_type rhs) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] *= rhs; } } /// @} /// @{ -------------------------------------------------- MULTIPLY CWISE template<typename T> [[nodiscard]] constexpr auto mult_cwise(T&& rhs) const& { using T_ = std::decay_t<T>; if constexpr (bk::is_matrix_v<T_>) { if constexpr (!std::is_lvalue_reference_v < T > && !bk::is_ref_matrix_v<T_> && !std::is_const_v < T > && bk::is_signed_common_type_v<typename T_::value_type, value_type>) { /* * const this& .* Matrix&& (non-const non-ref common_t) -> rhs&& */ rhs.mult_matrix_cwise_internal(*deriv()); return rhs; } else { using common_value_type = bk::signed_common_type_t<value_type, typename T_::value_type>; auto res = copy<common_value_type>(); res.mult_matrix_cwise_internal(rhs); return res; } } else { using common_value_type = bk::signed_common_type_t<value_type, T_>; auto res = copy<common_value_type>(); res.mult_scalar_cwise_internal(rhs); return res; } } template<typename T> [[nodiscard]] constexpr auto mult_cwise(T&& rhs)&& { using T_ = std::decay_t<T>; if constexpr (bk::is_matrix_v<T_> && !bk::is_ref_matrix_v<derived_type> && is_common_type_v<value_type, typename T_::value_type>) { /* * this&& (non-const non-ref common_t) .* Matrix -> this&& */ mult_matrix_cwise_internal(rhs); return std::move(*deriv()); } else { return const_this()->mult_cwise(std::forward<T>(rhs)); } } /// @} /// @{ -------------------------------------------------- OPERATOR *= constexpr void operator*=(value_type rhs) { mult_scalar_internal(rhs); } /// @} /// @{ -------------------------------------------------- DIVIDE CWISE BY MATRIX template<typename T> constexpr void div_matrix_cwise_internal(const T& rhs) { static_assert(_matrix_is_same_size_static(rhs)); assert(_matrix_is_same_size_runtime(rhs)); if (has_same_alignment(rhs)) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] /= rhs[i]; } } else { for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { (*deriv())(r, c) /= rhs(r, c); } } } } /// @} /// @{ -------------------------------------------------- DIVIDE CWISE BY SCALAR OR COMPLEX constexpr void div_scalar_cwise_internal(value_type rhs) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] /= rhs; } } /// @} /// @{ -------------------------------------------------- DIV CWISE template<typename T> [[nodiscard]] constexpr auto div_cwise(const T& rhs) const& { using common_value_type = std::conditional_t<bk::is_matrix_v<T>, signed_common_float_t<value_type, typename T::value_type>, signed_common_float_t<value_type, T>>; auto res = copy<common_value_type>(); if constexpr (bk::is_matrix_v<T>) { res.div_matrix_cwise_internal(rhs); } else { res.div_scalar_cwise_internal(rhs); } return res; } template<typename T> [[nodiscard]] constexpr auto div_cwise(const T& rhs)&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { if constexpr (bk::is_matrix_v<T>&& is_signed_common_float_v<value_type, typename T::value_type>) { /* * this&& (non-const non-ref floating_point) ./ Matrix -> this&& */ div_matrix_cwise_internal(rhs); return std::move(*deriv()); } else if constexpr (is_signed_common_float_v < value_type, value_type >) { /* * this&& (non-const non-ref floating_point) ./ scalar -> this&& */ div_scalar_cwise_internal(rhs); return std::move(*deriv()); } } else { return const_this()->div_cwise(rhs); } } /// @} /// @{ -------------------------------------------------- OPERATOR / [[nodiscard]] constexpr auto operator/(value_type rhs) const& { using common_value_type = bk::make_floating_point_t<value_type>; auto res = copy<common_value_type>(); res.div_scalar_cwise_internal(rhs); return res; } [[nodiscard]] constexpr auto operator/(value_type rhs)&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { div_scalar_cwise_internal(rhs); return std::move(*deriv()); } else { return const_this()->operator/(rhs); } } /// @} /// @{ -------------------------------------------------- OPERATOR /= constexpr void operator/=(value_type rhs) { div_scalar_cwise_internal(rhs); } /// @} //==================================================================================================== //===== MISC MATH FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- PRODUCT //! product of all elements constexpr value_type product() const { value_type s = 1; for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { s *= (*deriv())[i]; } return s; } /// @} /// @{ -------------------------------------------------- SUM //! sum of all elements constexpr value_type sum() const { value_type s = 0; for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { s += (*deriv())[i]; } return s; } /// @} /// @{ -------------------------------------------------- HELPER: SUM OF COLS private: constexpr value_type _sum_of_col(unsigned int colId) const { value_type s = 0; for (unsigned int r = 0; r < deriv()->num_rows(); ++r) { s += (*deriv())(r, colId); } return s; } public: /// @} /// @{ -------------------------------------------------- SUM OF CUSTOM COLS //! Calculates the sum of all elements in the specified columns. /*! * - duplicate colIds are not checked */ template<typename... TUInts> constexpr value_type sum_of_cols(TUInts... colIds) const { return (_sum_of_col(colIds) + ...); } /// @} /// @{ -------------------------------------------------- HELPER: SUM OF ROWS private: constexpr value_type _sum_of_row(unsigned int rowId) const { value_type s = 0; for (unsigned int c = 0; c < deriv()->num_cols(); ++c) { s += (*deriv())(rowId, c); } return s; } public: /// @} /// @{ -------------------------------------------------- SUM OF CUSTOM ROWS //! Calculates the sum of all elements in the specified rows. /*! * - duplicate rowIds are not checked */ template<typename... TUInts> constexpr value_type sum_of_rows(TUInts... rowIds) const { return (_sum_of_row(rowIds) + ...); } /// @} /// @{ -------------------------------------------------- MEAN //! mean / average of all elements [[nodiscard]] constexpr auto mean() const { using float_type = bk::make_floating_point_t<value_type>; return static_cast<float_type>(sum()) / static_cast<float_type>(deriv()->num_elements()); } /// @} /// @{ -------------------------------------------------- VARIANCE [[nodiscard]] constexpr auto variance() const { const auto m = mean(); using float_type = std::decay_t<decltype(m)>; if constexpr (bk::is_floating_point_v<value_type>) { float_type s = 0; for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { const float_type diff = static_cast<float_type>((*deriv())[i]) - m; s += diff * diff; } return s / static_cast<float_type>(deriv()->num_elements()); } } /// @} /// @{ -------------------------------------------------- STDEV [[nodiscard]] constexpr auto stdev() const { return std::sqrt(variance()); } /// @} /// @{ -------------------------------------------------- minimum element [[nodiscard]] constexpr auto min_element() const { return std::min_element(deriv()->begin(), deriv()->end()); } /// @} /// @{ -------------------------------------------------- MAXIMUM ELEMENT [[nodiscard]] constexpr auto max_element() const { return std::max_element(deriv()->begin(), deriv()->end()); } /// @} /// @{ -------------------------------------------------- ABS CWISE constexpr void abs_cwise_internal() { if constexpr (bk::is_signed_v<value_type>) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { if constexpr (bk::is_signed_v<value_type>) { (*deriv())[i] = std::abs((*deriv())[i]); } else { (*deriv())[i] = (*deriv())[i]; } } } } [[nodiscard]] constexpr auto abs_cwise() const& { auto res = copy(); res.abs_cwise_internal(); return res; } [[nodiscard]] constexpr auto abs_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { abs_cwise_internal(); return std::move(*deriv()); } else { return const_this()->abs_cwise(); } } /// @} /// @{ -------------------------------------------------- CBRT CWISE constexpr void cbrt_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::cbrt((*deriv())[i]); } } [[nodiscard]] constexpr auto cbrt_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.cbrt_cwise_internal(); return res; } [[nodiscard]] constexpr auto cbrt_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { cbrt_cwise_internal(); return std::move(*deriv()); } else { return const_this()->cbrt_cwise(); } } /// @} /// @{ -------------------------------------------------- EXP CWISE constexpr void exp_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::exp((*deriv())[i]); } } [[nodiscard]] constexpr auto exp_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.exp_cwise_internal(); return res; } [[nodiscard]] constexpr auto exp_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { exp_cwise_internal(); return std::move(*deriv()); } else { return const_this()->exp_cwise(); } } /// @} /// @{ -------------------------------------------------- LOGI CWISE constexpr void logi_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::log((*deriv())[i]); } } [[nodiscard]] constexpr auto logi_cwise() const& { auto res = copy<std::conditional_t < std::is_integral_v < value_type>, value_type, int >> (); res.logi_cwise_internal(); return res; } [[nodiscard]] constexpr auto logi_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && std::is_integral_v < value_type >) { logi_cwise_internal(); return std::move(*deriv()); } { return const_this()->logi_cwise(); } } /// @} /// @{ -------------------------------------------------- MOD CWISE template<typename T> constexpr void mod_cwise_internal(const T& x) { static_assert(std::is_integral_v < value_type > , "mod is for integral values types only"); for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = (*deriv())[i] % x; } } template<typename T> [[nodiscard]] constexpr auto mod_cwise(const T& x) const& { auto res = copy(); res.mod_cwise_internal(x); return res; } template<typename T> [[nodiscard]] constexpr auto mod_cwise(const T& x)&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { mod_cwise_internal(x); return std::move(*deriv()); } { return const_this()->mod_cwise(); } } /// @} /// @{ -------------------------------------------------- POW CWISE constexpr void pow_cwise_internal(double exponent) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::pow((*deriv())[i], exponent); } } [[nodiscard]] constexpr auto pow_cwise(double exponent) const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.pow_cwise_internal(exponent); return res; } [[nodiscard]] constexpr auto pow_cwise(double exponent)&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { pow_cwise_internal(exponent); return std::move(*deriv()); } else { return const_this()->pow_cwise(exponent); } } /// @} /// @{ -------------------------------------------------- POWI CWISE constexpr void powi_cwise_internal(int exponent) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::pow((*deriv())[i], exponent); } } [[nodiscard]] constexpr auto powi_cwise(int exponent) const& { auto res = copy(); res.powi_cwise_internal(exponent); return res; } [[nodiscard]] constexpr auto powi_cwise(int exponent)&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { powi_cwise_internal(exponent); return std::move(*deriv()); } else { return const_this()->powi_cwise(exponent); } } /// @} /// @{ -------------------------------------------------- SIGN CWISE constexpr void sign_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = static_cast<value_type>(!std::signbit((*deriv())[i])); } } [[nodiscard]] constexpr auto sign_cwise() const& { auto res = copy<std::conditional_t < bk::is_signed_v<value_type>, value_type, int>> (); res.sign_cwise_internal(); return res; } [[nodiscard]] constexpr auto sign_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_signed_v<value_type>) { sign_cwise_internal(); return std::move(*deriv()); } else { return const_this()->sign_cwise(); } } /// @} /// @{ -------------------------------------------------- SQR CWISE constexpr void sqr_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = (*deriv())[i] * (*deriv())[i]; } } [[nodiscard]] constexpr auto sqr_cwise() const& { auto res = copy(); res.sqr_cwise_internal(); return res; } [[nodiscard]] constexpr auto sqr_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { sqr_cwise_internal(); return std::move(*deriv()); } else { return const_this()->sqr_cwise(); } } /// @} /// @{ -------------------------------------------------- SQRT CWISE constexpr void sqrt_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::sqrt((*deriv())[i]); } } [[nodiscard]] constexpr auto sqrt_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.sqrt_cwise_internal(); return res; } [[nodiscard]] constexpr auto sqrt_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { sqrt_cwise_internal(); return std::move(*deriv()); } else { return const_this()->sqrt_cwise(); } } /// @} /// @{ -------------------------------------------------- SIN CWISE constexpr void sin_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::sin((*deriv())[i]); } } [[nodiscard]] constexpr auto sin_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.sin_cwise_internal(); return res; } [[nodiscard]] constexpr auto sin_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { sin_cwise_internal(); return std::move(*deriv()); } else { return const_this()->sin_cwise(); } } /// @} /// @{ -------------------------------------------------- ASIN CWISE constexpr void asin_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::asin((*deriv())[i]); } } [[nodiscard]] constexpr auto asin_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.asin_cwise_internal(); return res; } [[nodiscard]] constexpr auto asin_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { asin_cwise_internal(); return std::move(*deriv()); } else { return const_this()->asin_cwise(); } } /// @} /// @{ -------------------------------------------------- COS CWISE constexpr void cos_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::cos((*deriv())[i]); } } [[nodiscard]] constexpr auto cos_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.cos_cwise_internal(); return res; } [[nodiscard]] constexpr auto cos_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { cos_cwise_internal(); return std::move(*deriv()); } else { return const_this()->cos_cwise(); } } /// @} /// @{ -------------------------------------------------- ACOS CWISE constexpr void acos_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::acos((*deriv())[i]); } } [[nodiscard]] constexpr auto acos_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.acos_cwise_internal(); return res; } [[nodiscard]] constexpr auto acos_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { acos_cwise_internal(); return std::move(*deriv()); } else { return const_this()->acos_cwise(); } } /// @} /// @{ -------------------------------------------------- TAN CWISE constexpr void tan_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::tan((*deriv())[i]); } } [[nodiscard]] constexpr auto tan_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.tan_cwise_internal(); return res; } [[nodiscard]] constexpr auto tan_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { tan_cwise_internal(); return std::move(*deriv()); } else { return const_this()->tan_cwise(); } } /// @} /// @{ -------------------------------------------------- ATAN CWISE constexpr void atan_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::atan((*deriv())[i]); } } [[nodiscard]] constexpr auto atan_cwise() const& { auto res = copy<bk::make_floating_point_t<value_type>>(); res.atan_cwise_internal(); return res; } [[nodiscard]] constexpr auto atan_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type> && bk::is_floating_point_v<value_type>) { atan_cwise_internal(); return std::move(*deriv()); } else { return const_this()->atan_cwise(); } } /// @} /// @{ -------------------------------------------------- CEIL CWISE constexpr void ceil_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::ceil((*deriv())[i]); } } [[nodiscard]] constexpr auto ceil_cwise() const& { auto res = copy(); res.ceil_cwise_internal(); return res; } [[nodiscard]] constexpr auto ceil_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { ceil_cwise_internal(); return std::move(*deriv()); } else { return const_this()->ceil_cwise(); } } /// @} /// @{ -------------------------------------------------- FLOOR CWISE constexpr void floor_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::floor((*deriv())[i]); } } [[nodiscard]] constexpr auto floor_cwise() const& { auto res = copy(); res.floor_cwise_internal(); return res; } [[nodiscard]] constexpr auto floor_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { floor_cwise_internal(); return std::move(*deriv()); } else { return const_this()->floor_cwise(); } } /// @} /// @{ -------------------------------------------------- ROUND CWISE constexpr void round_cwise_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::round((*deriv())[i]); } } [[nodiscard]] constexpr auto round_cwise() const& { auto res = copy(); res.round_cwise_internal(); return res; } [[nodiscard]] constexpr auto round_cwise()&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { round_cwise_internal(); return std::move(*deriv()); } else { return const_this()->round_cwise(); } } /// @} /// @{ -------------------------------------------------- CLAMP CWISE constexpr void clamp_cwise_internal(value_type xmin, value_type xmax) { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { (*deriv())[i] = std::clamp((*deriv())[i], xmin, xmax); } } [[nodiscard]] constexpr auto clamp_cwise(value_type xmin, value_type xmax) const& { auto res = copy(); res.clamp_cwise_internal(xmin, xmax); return res; } [[nodiscard]] constexpr auto clamp_cwise(value_type xmin, value_type xmax)&& { if constexpr (!bk::is_ref_matrix_v<derived_type>) { clamp_cwise_internal(xmin, xmax); return std::move(*deriv()); } else { return const_this()->clamp_cwise(xmin, xmax); } } /// @} //==================================================================================================== //===== OTHER //==================================================================================================== /// @{ -------------------------------------------------- SORT ASCENDING //! sort values in ascending or descending order constexpr void sort_ascending_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { const value_type temp = (*deriv())[i]; int j = i - 1; while (j >= 0 && (*deriv())[j] > temp) { (*deriv())[j + 1] = (*deriv())[j]; --j; } (*deriv())[j + 1] = temp; } } [[nodiscard]] constexpr auto sort_ascending() const& { auto res = copy(); res.sort_ascending_internal(); return res; } [[nodiscard]] constexpr auto sort_ascending()&& { sort_ascending_internal(); return std::move(*deriv()); } /// @} /// @{ -------------------------------------------------- SORT DESCENDING constexpr void sort_descending_internal() { for (unsigned int i = 0; i < deriv()->num_elements(); ++i) { const value_type temp = (*deriv())[i]; int j = i - 1; while (j >= 0 && (*deriv())[j] < temp) { (*deriv())[j + 1] = (*deriv())[j]; --j; } (*deriv())[j + 1] = temp; } } [[nodiscard]] constexpr auto sort_descending() const& { auto res = copy(); res.sort_descending_internal(); return res; } [[nodiscard]] constexpr auto sort_descending()&& { sort_descending_internal(); return std::move(*deriv()); } /// @} }; // class MatrixFunctions } // namespace bk::details #endif //BK_MATRIXFUNCTIONS_H
sp.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// //--------------------------------------------------------------------- // program SP //--------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif #include "header.h" #include "print_results.h" /* common /global/ */ int grid_points[3], nx2, ny2, nz2; logical timeron; /* common /constants/ */ double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3, dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4, dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt, ce[5][13], dxmax, dymax, dzmax, xxcon1, xxcon2, xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4, yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt, dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6, c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16; /* common /fields/ */ double u [KMAX][JMAXP+1][IMAXP+1][5]; double us [KMAX][JMAXP+1][IMAXP+1]; double vs [KMAX][JMAXP+1][IMAXP+1]; double ws [KMAX][JMAXP+1][IMAXP+1]; double qs [KMAX][JMAXP+1][IMAXP+1]; double rho_i [KMAX][JMAXP+1][IMAXP+1]; double speed [KMAX][JMAXP+1][IMAXP+1]; double square [KMAX][JMAXP+1][IMAXP+1]; double rhs [KMAX][JMAXP+1][IMAXP+1][5]; double forcing[KMAX][JMAXP+1][IMAXP+1][5]; /* common /work_1d/ */ double cv [PROBLEM_SIZE]; double rhon[PROBLEM_SIZE]; double rhos[PROBLEM_SIZE]; double rhoq[PROBLEM_SIZE]; double cuf [PROBLEM_SIZE]; double q [PROBLEM_SIZE]; double ue [PROBLEM_SIZE][5]; double buf[PROBLEM_SIZE][5]; #pragma omp threadprivate(cv,rhon,rhos,rhoq,cuf,q,ue,buf) /* common /work_lhs/ */ double lhs [IMAXP+1][IMAXP+1][5]; double lhsp[IMAXP+1][IMAXP+1][5]; double lhsm[IMAXP+1][IMAXP+1][5]; #pragma omp threadprivate(lhs,lhsp,lhsm) int main(int argc, char *argv[]) { int i, niter, step, n3; double mflops, t, tmax, trecs[t_last+1]; logical verified; char Class; char *t_names[t_last+1]; //--------------------------------------------------------------------- // Read input file (if it exists), else take // defaults from parameters //--------------------------------------------------------------------- FILE *fp; if ((fp = fopen("timer.flag", "r")) != NULL) { timeron = true; t_names[t_total] = "total"; t_names[t_rhsx] = "rhsx"; t_names[t_rhsy] = "rhsy"; t_names[t_rhsz] = "rhsz"; t_names[t_rhs] = "rhs"; t_names[t_xsolve] = "xsolve"; t_names[t_ysolve] = "ysolve"; t_names[t_zsolve] = "zsolve"; t_names[t_rdis1] = "redist1"; t_names[t_rdis2] = "redist2"; t_names[t_tzetar] = "tzetar"; t_names[t_ninvr] = "ninvr"; t_names[t_pinvr] = "pinvr"; t_names[t_txinvr] = "txinvr"; t_names[t_add] = "add"; fclose(fp); } else { timeron = false; } printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - SP Benchmark\n\n"); if ((fp = fopen("inputsp.data", "r")) != NULL) { int result; printf(" Reading from input file inputsp.data\n"); result = fscanf(fp, "%d", &niter); while (fgetc(fp) != '\n'); result = fscanf(fp, "%lf", &dt); while (fgetc(fp) != '\n'); result = fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]); fclose(fp); } else { printf(" No input file inputsp.data. Using compiled defaults\n"); niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; } printf(" Size: %4dx%4dx%4d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %4d dt: %11.7f\n", niter, dt); printf(" Number of available threads: %5d\n", omp_get_max_threads()); printf("\n"); if ((grid_points[0] > IMAX) || (grid_points[1] > JMAX) || (grid_points[2] > KMAX) ) { printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); return 0; } nx2 = grid_points[0] - 2; ny2 = grid_points[1] - 2; nz2 = grid_points[2] - 2; set_constants(); for (i = 1; i <= t_last; i++) { timer_clear(i); } exact_rhs(); initialize(); //--------------------------------------------------------------------- // do one time step to touch all code, and reinitialize //--------------------------------------------------------------------- adi(); initialize(); for (i = 1; i <= t_last; i++) { timer_clear(i); } timer_start(1); #pragma scop for (step = 1; step <= niter; step++) { adi(); } #pragma endscop timer_stop(1); tmax = timer_read(1); verify(niter, &Class, &verified); if (tmax != 0.0) { n3 = grid_points[0]*grid_points[1]*grid_points[2]; t = (grid_points[0]+grid_points[1]+grid_points[2])/3.0; mflops = (881.174 * (double)n3 - 4683.91 * (t * t) + 11484.5 * t - 19272.4) * (double)niter / (tmax*1000000.0); } else { mflops = 0.0; } print_results("SP", Class, grid_points[0], grid_points[1], grid_points[2], niter, tmax, mflops, " floating point", verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); //--------------------------------------------------------------------- // More timers //--------------------------------------------------------------------- if (timeron) { for (i = 1; i <= t_last; i++) { trecs[i] = timer_read(i); } if (tmax == 0.0) tmax = 1.0; printf(" SECTION Time (secs)\n"); for (i = 1; i <= t_last; i++) { printf(" %-8s:%9.3f (%6.2f%%)\n", t_names[i], trecs[i], trecs[i]*100./tmax); if (i == t_rhs) { t = trecs[t_rhsx] + trecs[t_rhsy] + trecs[t_rhsz]; printf(" --> %8s:%9.3f (%6.2f%%)\n", "sub-rhs", t, t*100./tmax); t = trecs[t_rhs] - t; printf(" --> %8s:%9.3f (%6.2f%%)\n", "rest-rhs", t, t*100./tmax); } else if (i == t_zsolve) { t = trecs[t_zsolve] - trecs[t_rdis1] - trecs[t_rdis2]; printf(" --> %8s:%9.3f (%6.2f%%)\n", "sub-zsol", t, t*100./tmax); } else if (i == t_rdis2) { t = trecs[t_rdis1] + trecs[t_rdis2]; printf(" --> %8s:%9.3f (%6.2f%%)\n", "redist", t, t*100./tmax); } } } return 0; }
graph.h
#ifndef __GRAPH_H #define __GRAPH_H #include <algorithm> #include <fstream> #include <iostream> #include <omp.h> #include <parallel/algorithm> #include <random> #include <string> #include <tuple> #include <utility> #include "defs.h" #include "parallel_array.h" #include "utils.h" struct Edge { u32 from; u32 to; u32 weight; Edge() {} Edge(u32 from, u32 to, u32 weight) : from(from), to(to), weight(weight) {} }; bool operator<(const Edge& a, const Edge& b) { return std::tie(a.from, a.to, a.weight) < std::tie(b.from, b.to, b.weight); } struct Graph { ParallelArray<u32> nodes; ParallelArray<Edge> edges; Graph(u32 num_nodes, u32 num_edges) : nodes(num_nodes), edges(num_edges) {} u32 num_nodes() const { return nodes.size(); } u32 num_edges() const { return edges.size(); } void sort_edges() { __gnu_parallel::sort(edges.begin(), edges.end()); } }; /** * Loads a graph from given path * Graph format is: * 1: NUM_NODES NUM_EDGES * 2: FROM TO WEIGHT * 3: FROM TO WEIGHT * ... * * You only need to write each edge once **/ Graph load_graph(std::string filename) { std::cout << "Loading graph from path " << filename << "\n"; std::ifstream in(filename); u32 num_nodes; u32 num_edges; in >> num_nodes >> num_edges; std::cout << num_nodes << " nodes and " << num_edges << " edges\n"; Graph G(num_nodes, num_edges * 2); #pragma omp parallel for for (u32 i = 0; i < num_nodes; ++i) { G.nodes[i] = i; } for (u32 i = 0; i < num_edges; ++i) { u32 from, to, weight; in >> from >> to >> weight; G.edges[2 * i] = Edge(from, to, weight); G.edges[2 * i + 1] = Edge(to, from, weight); } G.sort_edges(); std::cout << "Graph loaded\n"; return G; } /** * Creates a random connected graph with n nodes and m edges * m >= n - 1 */ Graph generate_graph(u32 n, u32 m) { Graph G(n, 2 * m); u32 cnt = 0; for (u32 i = 0; i < n; ++i) { G.nodes[i] = i; } for (u32 i = 1; i <= n - 1; ++i) { u32 weight = gen(); u32 v = randint(0, i - 1); G.edges[cnt++] = Edge(i, v, weight); G.edges[cnt++] = Edge(v, i, weight); } for (u32 i = 1; i <= m - n + 1; ++i) { u32 weight = gen(); u32 u = randint(0, n - 1); u32 v = randint(0, n - 2); if (v >= u) ++v; G.edges[cnt++] = Edge(u, v, weight); G.edges[cnt++] = Edge(v, u, weight); } G.sort_edges(); return G; } void dfs(u32 u, std::vector<std::vector<u32>>& g, std::vector<u32>& used) { used[u] = 1; for (auto v : g[u]) { if (used[v] == 0) dfs(v, g, used); } } bool is_connected(Graph G) { std::vector<std::vector<u32>> g(G.num_edges()); std::vector<u32> used(G.num_nodes()); for (auto e : G.edges) { g[e.from].push_back(e.to); g[e.to].push_back(e.from); } u32 num_comp = 0; for (u32 i = 0; i < G.num_nodes(); ++i) { if (!used[i]) { dfs(i, g, used); ++num_comp; } } return num_comp == 1; } #endif
csr.c
/*! \file csr.c \brief Functions for dealing with csr structures \author David C. Anastasiu */ #include "includes.h" #define DA_OMPMINOPS 50000 //forward declaration void da_csr_WriteClean(int32_t **bptr, int32_t **bind, float **bval, char * filename, FILE *fpout); /*************************************************************************/ /*! Allocate memory for a CSR matrix and initializes it \returns the allocated matrix. The various fields are set to NULL. */ /**************************************************************************/ da_csr_t *da_csr_Create() { da_csr_t *mat; mat = (da_csr_t *)gk_malloc(sizeof(da_csr_t), "da_csr_Create: mat"); da_csr_Init(mat); return mat; } /*************************************************************************/ /*! Initializes the matrix \param mat is the matrix to be initialized. */ /*************************************************************************/ void da_csr_Init(da_csr_t *mat) { memset(mat, 0, sizeof(da_csr_t)); mat->nrows = mat->ncols = -1; } /*************************************************************************/ /*! Frees all the memory allocated for matrix. \param mat is the matrix to be freed. */ /*************************************************************************/ void da_csr_Free(da_csr_t **mat) { if (*mat == NULL) return; da_csr_FreeContents(*mat); gk_free((void **)mat, LTERM); } /*************************************************************************/ /*! Frees a variable sized list of matrix pointers. Last item in the list must be LTERM. \param ptr1 is the first matrix to be freed, \param ... are additional matrices to be freed. */ /*************************************************************************/ void da_csr_FreeAll(da_csr_t **ptr1,...) { va_list plist; void **ptr; if (*ptr1 != NULL) da_csr_Free(ptr1); va_start(plist, ptr1); while ((ptr = va_arg(plist, void **)) != LTERM) { if (*ptr != NULL) { da_csr_Free((da_csr_t **)ptr); } } va_end(plist); } /*************************************************************************/ /*! Frees the memory allocated for the matrix's fields for the given type and sets them to NULL. \param mat is the matrix whose partial contents will be freed, \param type is the internal representation to be freed: row (DA_ROW), or column (DA_COL). */ /*************************************************************************/ void da_csr_FreeBase(da_csr_t *mat, char type) { if(type & DA_ROW) //drop the row index gk_free((void **)&mat->rowptr, &mat->rowind, &mat->rowval, LTERM); if(type & DA_COL) //drop the column index gk_free((void **)&mat->colptr, &mat->colind, &mat->colval, LTERM); } /*************************************************************************/ /*! Create missing indexes for the matrix such that both indexes exist. \param mat is the matrix whose bases need loading. */ /*************************************************************************/ void da_csr_LoadBases(da_csr_t *csr) { if(csr->rowptr && csr->colptr) return; if(!csr->rowptr && !csr->colptr) return; if(csr->rowptr){ da_csr_CreateIndex(csr, DA_COL); // sort column indices for input matrices da_csr_SortIndices(csr, DA_COL); } else { da_csr_CreateIndex(csr, DA_ROW); // sort column indices for input matrices da_csr_SortIndices(csr, DA_ROW); } } /*************************************************************************/ /*! Frees only the memory allocated for the matrix's different fields and sets them to NULL. \param mat is the matrix whose contents will be freed. */ /*************************************************************************/ void da_csr_FreeContents(da_csr_t *mat) { gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids, &mat->colptr, &mat->colind, &mat->colval, &mat->colids, &mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums, &mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols, &mat->rwgts, &mat->cwgts, LTERM); } /*************************************************************************/ /*! Transfers the memory within from to the to matrix. \params from is the matrix from which we transfer memory. Its pointers will be set to NULL. \param to is the matrix whose contents will be freed that will receive memory from the from matrix. */ /*************************************************************************/ void da_csr_Transfer(da_csr_t *from, da_csr_t *to) { da_csr_FreeContents(to); to->rowptr = from->rowptr; to->rowind = from->rowind; to->rowval = from->rowval; to->rowids = from->rowids; to->colptr = from->colptr; to->colind = from->colind; to->colval = from->colval; to->colids = from->colids; to->rnorms = from->rnorms; to->cnorms = from->cnorms; to->rsums = from->rsums; to->csums = from->csums; to->rsizes = from->rsizes; to->csizes = from->csizes; to->rvols = from->rvols; to->cvols = from->cvols; to->rwgts = from->rwgts; to->cwgts = from->cwgts; to->nrows = from->nrows; to->ncols = from->ncols; from->rowptr = NULL; from->rowind = NULL; from->rowval = NULL; from->rowids = NULL; from->colptr = NULL; from->colind = NULL; from->colval = NULL; from->colids = NULL; from->rnorms = NULL; from->cnorms = NULL; from->rsums = NULL; from->csums = NULL; from->rsizes = NULL; from->csizes = NULL; from->rvols = NULL; from->cvols = NULL; from->rwgts = NULL; from->cwgts = NULL; } /*************************************************************************/ /*! Returns a copy of a matrix. \param mat is the matrix to be duplicated. \returns the newly created copy of the matrix. */ /**************************************************************************/ da_csr_t *da_csr_Copy(da_csr_t *mat) { da_csr_t *nmat; nmat = da_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = da_pcopy(mat->nrows+1, mat->rowptr, da_pmalloc(mat->nrows+1, "da_csr_Dup: rowptr")); if (mat->rowids) nmat->rowids = da_icopy(mat->nrows, mat->rowids, da_imalloc(mat->nrows, "da_csr_Dup: rowids")); if (mat->rnorms) nmat->rnorms = da_vcopy(mat->nrows, mat->rnorms, da_vmalloc(mat->nrows, "da_csr_Dup: rnorms")); if (mat->rowind) nmat->rowind = da_icopy(mat->rowptr[mat->nrows], mat->rowind, da_imalloc(mat->rowptr[mat->nrows], "da_csr_Dup: rowind")); if (mat->rowval) nmat->rowval = da_vcopy(mat->rowptr[mat->nrows], mat->rowval, da_vmalloc(mat->rowptr[mat->nrows], "da_csr_Dup: rowval")); /* copy the col structure */ if (mat->colptr) nmat->colptr = da_pcopy(mat->ncols+1, mat->colptr, da_pmalloc(mat->ncols+1, "da_csr_Dup: colptr")); if (mat->colids) nmat->colids = da_icopy(mat->ncols, mat->colids, da_imalloc(mat->ncols, "da_csr_Dup: colids")); if (mat->cnorms) nmat->cnorms = da_vcopy(mat->ncols, mat->cnorms, da_vmalloc(mat->ncols, "da_csr_Dup: cnorms")); if (mat->colind) nmat->colind = da_icopy(mat->colptr[mat->ncols], mat->colind, da_imalloc(mat->colptr[mat->ncols], "da_csr_Dup: colind")); if (mat->colval) nmat->colval = da_vcopy(mat->colptr[mat->ncols], mat->colval, da_vmalloc(mat->colptr[mat->ncols], "da_csr_Dup: colval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a set of consecutive rows. \param mat is the original matrix. \param rstart is the starting row. \param nrows is the number of rows from rstart to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ da_csr_t *da_csr_ExtractSubmatrix(da_csr_t *mat, idx_t rstart, idx_t nrows) { ssize_t i; da_csr_t *nmat; if (rstart+nrows > mat->nrows) return NULL; nmat = da_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = da_pcopy(nrows+1, mat->rowptr+rstart, da_pmalloc(nrows+1, "da_csr_ExtractSubmatrix: rowptr")); for (i=nrows; i>=0; i--) nmat->rowptr[i] -= nmat->rowptr[0]; ASSERT(nmat->rowptr[0] == 0); if (mat->rowids) nmat->rowids = da_icopy(nrows, mat->rowids+rstart, da_imalloc(nrows, "da_csr_ExtractSubmatrix: rowids")); if (mat->rnorms) nmat->rnorms = da_vcopy(nrows, mat->rnorms+rstart, da_vmalloc(nrows, "da_csr_ExtractSubmatrix: rnorms")); if (mat->rsums) nmat->rsums = da_vcopy(nrows, mat->rsums+rstart, da_vmalloc(nrows, "da_csr_ExtractSubmatrix: rsums")); ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]); if (mat->rowind) nmat->rowind = da_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowind+mat->rowptr[rstart], da_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "da_csr_ExtractSubmatrix: rowind")); if (mat->rowval) nmat->rowval = da_vcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowval+mat->rowptr[rstart], da_vmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "da_csr_ExtractSubmatrix: rowval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ da_csr_t *da_csr_ExtractRows(da_csr_t *mat, idx_t nrows, idx_t *rind) { ssize_t i, ii, j, nnz; da_csr_t *nmat; nmat = da_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; nmat->rowptr = da_pmalloc(nmat->nrows+1, "da_csr_ExtractPartition: rowptr"); nmat->rowind = da_imalloc(nnz, "da_csr_ExtractPartition: rowind"); nmat->rowval = da_vmalloc(nnz, "da_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; da_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); da_vcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ void da_csr_ExtractRowsInto(da_csr_t *mat, da_csr_t *nmat, idx_t nrows, idx_t *rind) { ssize_t i, ii, j, nnz; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; ASSERT(nmat->rowptr[nmat->nrows] >= nnz) nmat->ncols = mat->ncols; nmat->nrows = nrows; nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; da_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); da_vcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); } /*************************************************************************/ /*! Returns a submatrix corresponding to a specified partitioning of rows. \param mat is the original matrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ da_csr_t *da_csr_ExtractPartition(da_csr_t *mat, idx_t *part, idx_t pid) { ssize_t i, j, nnz; da_csr_t *nmat; nmat = da_csr_Create(); nmat->nrows = 0; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { nmat->nrows++; nnz += mat->rowptr[i+1]-mat->rowptr[i]; } } nmat->rowptr = da_pmalloc(nmat->nrows+1, "da_csr_ExtractPartition: rowptr"); nmat->rowind = da_imalloc(nnz, "da_csr_ExtractPartition: rowind"); nmat->rowval = da_vmalloc(nnz, "da_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { da_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); da_vcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Splits the matrix into multiple sub-matrices based on the provided color array. \param mat is the original matrix. \param color is an array of size equal to the number of non-zeros in the matrix (row-wise structure). The matrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of matrices for each supplied color number. */ /**************************************************************************/ da_csr_t **da_csr_Split(da_csr_t *mat, idx_t *color) { ssize_t i, j; idx_t nrows, ncolors; ptr_t *rowptr; idx_t *rowind; val_t *rowval; da_csr_t **smats; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; ncolors = da_imax(rowptr[nrows], color)+1; smats = (da_csr_t **)gk_malloc(sizeof(da_csr_t *)*ncolors, "da_csr_Split: smats"); for (i=0; i<ncolors; i++) { smats[i] = da_csr_Create(); smats[i]->nrows = mat->nrows; smats[i]->ncols = mat->ncols; smats[i]->rowptr = da_psmalloc(nrows+1, 0, "da_csr_Split: smats[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) smats[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, smats[i]->rowptr); for (i=0; i<ncolors; i++) { smats[i]->rowind = da_imalloc(smats[i]->rowptr[nrows], "da_csr_Split: smats[i]->rowind"); smats[i]->rowval = da_vmalloc(smats[i]->rowptr[nrows], "da_csr_Split: smats[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j]; smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j]; smats[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, smats[i]->rowptr); return smats; } /**************************************************************************/ /*! Reads a CSR matrix from the supplied file and stores it the matrix's forward structure. \param filename is the file that stores the data. \param format is either DA_FMT_METIS, DA_FMT_CLUTO, DA_FMT_CSR, DA_FMT_BINROW, DA_FMT_BINCOL specifying the type of the input format. The DA_FMT_CSR does not contain a header line, whereas the DA_FMT_BINROW is a binary format written by da_csr_Write() using the same format specifier. \param readvals is either 1 or 0, indicating if the CSR file contains values or it does not. It only applies when DA_FMT_CSR is used. \param numbering is either 1 or 0, indicating if the numbering of the indices start from 1 or 0, respectively. If they start from 1, they are automatically decreamented during input so that they will start from 0. It only applies when DA_FMT_CSR is used. \returns the matrix that was read. */ /**************************************************************************/ da_csr_t *da_csr_Read(char *filename, char format, char readvals, char numbering) { ssize_t i, j, k, l; size_t nrows, ncols, nnz, nfields, fmt, ncon, lnlen, read, size; ptr_t *rowptr; idx_t *rowind; int32_t *iinds, *jinds, *bptr = NULL, *bind = NULL, rid, len, nnz2, nr, nc, nz, rv; val_t *rowval = NULL, *vals, fval; float *bval = NULL; char readsizes, readwgts; char *line = NULL, *head, *tail, fmtstr[256]; FILE *fpin; da_csr_t *mat = NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == DA_FMT_BINROW) { mat = da_csr_Create(); fpin = gk_fopen(filename, "rb", "da_csr_Read: fpin"); if (fread(&(nr), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(nc), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->nrows = nr; mat->ncols = nc; mat->rowptr = da_pmalloc(nr+1, "da_csr_Read: rowptr"); if(sizeof(ptr_t) != sizeof(int32_t)){ bptr = da_i32malloc(nr+1, "da_csr_Read: bptr"); if (fread(bptr, sizeof(int32_t), nr+1, fpin) != nr+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); for(i=0; i < nr+1; i++) mat->rowptr[i] = bptr[i]; gk_free((void**)&bptr, LTERM); } else if (fread(mat->rowptr, sizeof(ptr_t), nr+1, fpin) != nr+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); nnz = mat->rowptr[mat->nrows]; mat->rowind = da_imalloc(nnz, "da_csr_Read: rowind"); if(sizeof(idx_t) != sizeof(int32_t)){ bind = da_i32malloc(nnz, "da_csr_Read: bind"); if (fread(bind, sizeof(int32_t), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); for(i=0; i < nnz; i++) mat->rowind[i] = bind[i]; gk_free((void**)&bind, LTERM); } else if (fread(mat->rowind, sizeof(idx_t), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); if (readvals == 1) { mat->rowval = da_vmalloc(nnz, "da_csr_Read: rowval"); if(sizeof(val_t) != sizeof(float)){ bval = da_fmalloc(nnz, "da_csr_Read: bval"); if (fread(bind, sizeof(float), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); for(i=0; i < nnz; i++) mat->rowval[i] = bval[i]; gk_free((void**)&bval, LTERM); } else if (fread(mat->rowval, sizeof(val_t), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == DA_FMT_BINAP) { mat = da_csr_Create(); fpin = gk_fopen(filename, "rb", "da_csr_Read: fpin"); if(fread(&nr, sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Could not read the number of rows...\n"); mat->nrows = nr; struct stat st; stat(filename, &st); long size = st.st_size; nnz = (size - sizeof(int32_t) * mat->nrows - 1) / (sizeof(int32_t) + sizeof(float)); mat->rowptr = da_pmalloc(nr+1, "da_csr_Read: rowptr"); if(sizeof(ptr_t) != sizeof(int32_t)) bptr = da_i32malloc(nr+1, "da_csr_Read: bptr"); mat->rowind = da_imalloc(nnz, "da_csr_Read: rowind"); if(sizeof(idx_t) != sizeof(int32_t)) bind = da_i32malloc(nnz, "da_csr_Read: bind"); if (readvals == 1){ mat->rowval = da_vmalloc(nnz, "da_csr_Read: rowval"); if(sizeof(val_t) != sizeof(float)) bval = da_fmalloc(nnz, "da_csr_Read: bval"); } mat->rowptr[0] = 0; for ( i=0; i < nr; i++ ) { if(fread(&len, sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the number of elements in row %d " "from file %s!\n", i+1, filename); if(sizeof(idx_t) != sizeof(int32_t)){ if(fread( bind, sizeof(int32_t), len, fpin) != len) gk_errexit(SIGERR, "Failed to read the indicator elements in row %d " "from file %s!\n", i+1, filename); for(j=0; j < len; j++) mat->rowind[mat->rowptr[i]+j] = bind[j]; } else if(fread( mat->rowind+mat->rowptr[i], sizeof(idx_t), len, fpin) != len) gk_errexit(SIGERR, "Failed to read the indicator elements in row %d " "from file %s!\n", i+1, filename); if(sizeof(val_t) != sizeof(float)){ if(fread( bval, sizeof(float), len, fpin) != len) gk_errexit(SIGERR, "Failed to read the value elements in row %d " "from file %s!\n", i+1, filename); for(j=0; j < len; j++) mat->rowval[mat->rowptr[i]+j] = bval[j]; } else if(fread( mat->rowval+mat->rowptr[i], sizeof(val_t), len, fpin) != len) gk_errexit(SIGERR, "Failed to read the value elements in row %d " "from file %s!\n", i+1, filename); mat->rowptr[i+1] = mat->rowptr[i] + len; } if ( mat->rowptr[mat->nrows] != nnz ) { nnz = mat->rowptr[mat->nrows]; mat->rowind = da_irealloc(mat->rowind, nnz, "da_csr_Read: rowind resize"); mat->rowval = da_vrealloc(mat->rowval, nnz, "da_csr_Read: rowval resize"); } for ( i=0, ncols=0; i<nnz; i++ ){ if(mat->rowind[i] > ncols) ncols = mat->rowind[i]; mat->rowind[i]--; } mat->ncols = ncols; da_csr_SortIndices(mat, DA_ROW); gk_fclose(fpin); gk_free((void**)&bptr, &bind, &bval, LTERM); return mat; } if (format == DA_FMT_BINAPB) { mat = da_csr_Create(); fpin = gk_fopen(filename, "rb", "da_csr_Read: fpin"); struct stat st; stat(filename, &st); long size = st.st_size; nnz = size/(sizeof(int32_t)); // Going to make the pessimistic assumption that there are as // many records as there are nnz mat->rowptr = da_pmalloc(nnz+1, "da_csr_Read: rowptr"); mat->rowind = da_imalloc(nnz, "da_csr_Read: rowind"); mat->nrows = 0; mat->rowptr[0] = 0; mat->rowval = NULL; nnz2 = 0; while ( fread(&rid, sizeof(int32_t), 1, fpin) == 1 ) { if(fread(&len, sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Could not read row length in file %s!\n", filename); if(sizeof(idx_t) != sizeof(int32_t)){ if(fread( bind, sizeof(int32_t), len, fpin) != len) gk_errexit(SIGERR, "Failed to read the indicator elements in row %d " "from file %s!\n", mat->nrows+1, filename); for(j=0; j < len; j++) mat->rowind[mat->rowptr[mat->nrows]+j] = bind[j]; } else if(fread( mat->rowind+mat->rowptr[mat->nrows], sizeof(idx_t), len, fpin) != len) gk_errexit(SIGERR, "Failed to read the indicator elements in row %d " "from file %s!\n", mat->nrows+1, filename); nnz2 += len; ASSERT(nnz2 <= nnz); mat->rowptr[mat->nrows+1] = mat->rowptr[mat->nrows] + len; mat->nrows++; ASSERT(mat->nrows <= nnz); } mat->rowptr = da_prealloc(mat->rowptr, (mat->nrows+1), "da_csr_Read: rowptr resize"); if ( mat->rowptr[mat->nrows] != nnz ) { nnz = mat->rowptr[mat->nrows]; mat->rowind = da_irealloc(mat->rowind, nnz, "da_csr_Read: rowind resize"); } for ( i=0, ncols=0; i<nnz; i++ ){ if(mat->rowind[i] > ncols) ncols = mat->rowind[i]; mat->rowind[i]--; } mat->ncols = ncols; da_csr_SortIndices(mat, DA_ROW); gk_fclose(fpin); gk_free((void**) &bind, LTERM); return mat; } if (format == DA_FMT_BINCOL) { mat = da_csr_Create(); fpin = gk_fopen(filename, "rb", "da_csr_Read: fpin"); if (fread(&(nr), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(nc), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->nrows = nr; mat->ncols = nc; mat->colptr = da_pmalloc(nc+1, "da_csr_Read: colptr"); if(sizeof(ptr_t) != sizeof(int32_t)){ bptr = da_i32malloc(nc+1, "da_csr_Read: bptr"); if (fread(bptr, sizeof(int32_t), nc+1, fpin) != nr+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); for(i=0; i < nc+1; i++) mat->colptr[i] = bptr[i]; gk_free((void**)&bptr, LTERM); } else if (fread(mat->colptr, sizeof(ptr_t), nc+1, fpin) != nc+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); nnz = mat->colptr[mat->ncols]; mat->colind = da_imalloc(nnz, "da_csr_Read: colind"); if(sizeof(idx_t) != sizeof(int32_t)){ bind = da_i32malloc(nnz, "da_csr_Read: bind"); if (fread(bind, sizeof(int32_t), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); for(i=0; i < nnz; i++) mat->colind[i] = bind[i]; gk_free((void**)&bind, LTERM); } else if (fread(mat->colind, sizeof(idx_t), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); if (readvals == 1) { mat->colval = da_vmalloc(nnz, "da_csr_Read: colval"); if(sizeof(val_t) != sizeof(float)){ bval = da_fmalloc(nnz, "da_csr_Read: bval"); if (fread(bind, sizeof(float), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); for(i=0; i < nnz; i++) mat->colval[i] = bval[i]; gk_free((void**)&bval, LTERM); } else if (fread(mat->colval, sizeof(val_t), nnz, fpin) != nnz) gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == DA_FMT_BIJV) { mat = da_csr_Create(); fpin = gk_fopen(filename, "rb", "da_csr_Read: fpin"); if (fread(&(nr), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(nc), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); if (fread(&nnz, sizeof(size_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nnz from file %s!\n", filename); if (fread(&rv, sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the readvals from file %s!\n", filename); if(readvals != rv) gk_errexit(SIGERR, "Readvals requested but file %s does not contain values!\n", filename); iinds = da_i32malloc(nnz, "iinds"); jinds = da_i32malloc(nnz, "jinds"); vals = (readvals ? da_vmalloc(nnz, "vals") : NULL); nrows = nr; ncols = nc; for (i=0; i<nnz; i++) { if (fread(&(iinds[i]), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read iinds[i] from file %s!\n", filename); if (fread(&(jinds[i]), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read jinds[i] from file %s!\n", filename); if (readvals) { if (fread(&(vals[i]), sizeof(float), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read vals[i] from file %s!\n", filename); } } gk_fclose(fpin); } if (format == DA_FMT_IJV) { gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && 3 * nrows != nnz) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the file %s is not a multiple of 3.\n", nnz, readvals, filename); if (readvals == 0 && 2 * nrows != nnz) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the file %s is not a multiple of 2.\n", nnz, readvals, filename); mat = da_csr_Create(); nnz = nrows; numbering = (numbering ? -1 : 0); iinds = da_i32malloc(nnz, "iinds"); jinds = da_i32malloc(nnz, "jinds"); vals = (readvals ? da_vmalloc(nnz, "vals") : NULL); fpin = gk_fopen(filename, "r", "da_csr_Read: fpin"); for (nrows = 0, ncols = 0, i = 0; i < nnz; i++) { if (readvals) { if (fscanf(fpin, "%d %d %f", &iinds[i], &jinds[i], &vals[i]) != 3) gk_errexit(SIGERR, "Error: Failed to read (i, j, val) for nnz: %zd.\n", i); } else { if (fscanf(fpin, "%d %d", &iinds[i], &jinds[i]) != 2) gk_errexit(SIGERR, "Error: Failed to read (i, j) value for nnz: %zd.\n", i); } iinds[i] += numbering; jinds[i] += numbering; if (nrows < iinds[i]) nrows = iinds[i]; if (ncols < jinds[i]) ncols = jinds[i]; } nrows++; ncols++; gk_fclose(fpin); } if (format == DA_FMT_IJV || format == DA_FMT_BIJV) { /* convert (i, j, v) into a CSR matrix */ mat->nrows = nrows; mat->ncols = ncols; rowptr = mat->rowptr = da_psmalloc(mat->nrows + 1, 0, "rowptr"); rowind = mat->rowind = da_imalloc(nnz, "rowind"); if (readvals) rowval = mat->rowval = da_vmalloc(nnz, "rowval"); for (i = 0; i < nnz; i++) rowptr[iinds[i]]++; MAKECSR(i, mat->nrows, rowptr); for (i = 0; i < nnz; i++) { rowind[rowptr[iinds[i]]] = jinds[i]; if (readvals) rowval[rowptr[iinds[i]]] = vals[i]; rowptr[iinds[i]]++; } SHIFTCSR(i, mat->nrows, rowptr); gk_free((void **) &iinds, &jinds, &vals, LTERM); return mat; } if (format == DA_FMT_CLUTO) { fpin = gk_fopen(filename, "r", "da_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3) gk_errexit(SIGERR, "Header line must contain 3 integers.\n"); readsizes = 0; readwgts = 0; readvals = 1; numbering = 1; } else if (format == DA_FMT_METIS) { fpin = gk_fopen(filename, "r", "da_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); ncols = nrows; nnz *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { readsizes = 0; readwgts = 0; gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && nnz%2 == 1) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the file %s is not even.\n", nnz, readvals, filename); if (readvals == 1) nnz = nnz/2; fpin = gk_fopen(filename, "r", "da_csr_Read: fpin"); } mat = da_csr_Create(); mat->nrows = nrows; rowptr = mat->rowptr = da_pmalloc(nrows+1, "da_csr_Read: rowptr"); rowind = mat->rowind = da_imalloc(nnz, "da_csr_Read: rowind"); if (readvals != 2) rowval = mat->rowval = da_vsmalloc(nnz, 1.0, "da_csr_Read: rowval"); if (readsizes) mat->rsizes = da_vsmalloc(nrows, 0.0, "da_csr_Read: rsizes"); if (readwgts) mat->rwgts = da_vsmalloc(nrows*ncon, 0.0, "da_csr_Read: rwgts"); /*---------------------------------------------------------------------- * Read the sparse matrix file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { #ifdef __MSC__ mat->rsizes[i] = (float)strtod(head, &tail); #else mat->rsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (mat->rsizes[i] < 0) errexit("The size for vertex %zd must be >= 0\n", i+1); head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { #ifdef __MSC__ mat->rwgts[i*ncon+l] = (float)strtod(head, &tail); #else mat->rwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) errexit("The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (mat->rwgts[i*ncon+l] < 0) errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); head = tail; } } /* Read the rest of the row */ while (1) { len = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((rowind[k] = len + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", len, i); ncols = gk_max(rowind[k], ncols); if (readvals == 1) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k); head = tail; rowval[k] = fval; } k++; } rowptr[i+1] = k; } if (format == DA_FMT_METIS) { ASSERT(ncols+1 == mat->nrows); mat->ncols = mat->nrows; } else { mat->ncols = ncols+1; } if (k != nnz) gk_errexit(SIGERR, "da_csr_Read: Something wrong with the number of nonzeros in " "the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return mat; } void inline da_csr_WriteClean(int32_t **bptr, int32_t **bind, float **bval, char * filename, FILE *fpout){ if (sizeof(ptr_t) != sizeof(int32_t)) gk_free((void**) bptr, LTERM); if (sizeof(idx_t) != sizeof(int32_t)) gk_free((void**) bind, LTERM); if (sizeof(val_t) != sizeof(float)) gk_free((void**) bval, LTERM); if (fpout) gk_fclose(fpout); } /**************************************************************************/ /*! Writes the row-based structure of a matrix into a file. \param mat is the matrix to be written, \param filename is the name of the output file. \param format is one of: DA_FMT_CLUTO, DA_FMT_CSR, DA_FMT_BINROW, DA_FMT_BINCOL. \param writevals is either 1 or 0 indicating if the values will be written or not. This is only applicable when DA_FMT_CSR is used. \param numbering is either 1 or 0 indicating if the internal 0-based numbering will be shifted by one or not during output. This is only applicable when DA_FMT_CSR is used. */ /**************************************************************************/ void da_csr_Write(da_csr_t *mat, char *filename, char format, char writevals, char numbering) { ssize_t i, j; size_t nnz; idx_t len; int32_t nr, nc, vId; int32_t edge[2], *bind = NULL, *bptr = NULL; float *bval = NULL; da_csr_t *tmp = NULL; FILE *fpout = NULL; if (!mat->rowval) writevals = 0; nr = mat->nrows; nc = mat->ncols; nnz = (format == DA_FMT_BINCOL) ? mat->colptr[mat->ncols] : mat->rowptr[mat->nrows]; if (sizeof(ptr_t) != sizeof(int32_t)){ if(format == DA_FMT_BINCOL){ if(!mat->colptr) gk_errexit(SIGERR, "DA_FMT_BINCOL and no mat->colptr!\n"); bptr = da_i32malloc(mat->ncols+1, "da_csr_Write: bptr"); for(i=0; i <= mat->ncols; i++) bptr[i] = mat->colptr[i]; } else { bptr = da_i32malloc(mat->nrows+1, "da_csr_Write: bptr"); for(i=0; i <= mat->nrows; i++) bptr[i] = mat->rowptr[i]; } } else bptr = (format == DA_FMT_BINCOL) ? (int32_t*)mat->colptr : (int32_t*)mat->rowptr; if (sizeof(idx_t) != sizeof(int32_t)){ bind = da_i32malloc(nnz, "da_csr_Write: bind"); if(format == DA_FMT_BINCOL){ for(i=0; i < nnz; i++) bind[i] = mat->colind[i]; } else { for(i=0; i < nnz; i++) bind[i] = mat->rowind[i]; } } else bind = (format == DA_FMT_BINCOL) ? (int32_t*)mat->colind : (int32_t*)mat->rowind; if (writevals) if (sizeof(val_t) != sizeof(float)){ bval = da_fmalloc(nnz, "da_csr_Write: bval"); if(format == DA_FMT_BINCOL) for(i=0; i < nnz; i++) bval[i] = mat->colval[i]; else for(i=0; i < nnz; i++) bval[i] = mat->rowval[i]; } else bval = (format == DA_FMT_BINCOL) ? (float*)mat->colval : (float*)mat->rowval; if (format == DA_FMT_BINAP) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); assert(mat->nrows <= INT32_MAX); assert(mat->ncols <= INT32_MAX); assert(mat->rowptr[mat->nrows] <= SIZE_MAX); fpout = gk_fopen(filename, "wb", "da_csr_Write: fpout"); // first add 1 to all features for (i = 0; i < nnz; i++) bind[i]++; if (!bval) gk_errexit(SIGERR, "da_csr_Write: Format requires values but rowval not present.\n"); // write the number of rows fwrite(&nr, sizeof(int32_t), 1, fpout); for (i = 0; i < nr; i++) { len = bptr[i+1] - bptr[i]; fwrite(&len, sizeof(int32_t), 1, fpout); if (len > 0) { fwrite(bind + bptr[i], sizeof(int32_t), len, fpout); fwrite(bval + bptr[i], sizeof(float), len, fpout); } } // return rowind to its initial state for (i = 0; i < nnz; i++) bind[i]--; da_csr_WriteClean(&bptr, &bind, &bval, filename, fpout); return; } if (format == DA_FMT_BINAPB) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); assert(mat->nrows <= INT32_MAX); assert(mat->ncols <= INT32_MAX); assert(mat->rowptr[mat->nrows] <= SIZE_MAX); fpout = gk_fopen(filename, "wb", "da_csr_Write: fpout"); // first add 1 to all features for (i = 0; i < nnz; i++) bind[i]++; for (i = 0, vId = 1; i < nr; i++, vId++) { fwrite(&vId, sizeof(int32_t), 1, fpout); len = bptr[i+1] - bptr[i]; fwrite(&len, sizeof(int32_t), 1, fpout); if (len > 0) fwrite(bind + bptr[i], sizeof(int32_t), len, fpout); } // return rowind to its initial state for (i = 0; i < nnz; i++) bind[i]--; da_csr_WriteClean(&bptr, &bind, &bval, filename, fpout); return; } if (format == DA_FMT_BINROW) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); assert(mat->nrows <= INT32_MAX); assert(mat->ncols <= INT32_MAX); assert(mat->rowptr[mat->nrows] <= SIZE_MAX); fpout = gk_fopen(filename, "wb", "da_csr_Write: fpout"); fwrite(&nr, sizeof(int32_t), 1, fpout); fwrite(&nc, sizeof(int32_t), 1, fpout); fwrite(bptr, sizeof(int32_t), nr+1, fpout); fwrite(bind, sizeof(int32_t), nnz, fpout); if (writevals) fwrite(bval, sizeof(float), nnz, fpout); da_csr_WriteClean(&bptr, &bind, &bval, filename, fpout); return; } if (format == DA_FMT_BINCOL) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); assert(mat->nrows <= INT32_MAX); assert(mat->ncols <= INT32_MAX); assert(mat->rowptr[mat->nrows] <= SIZE_MAX); fpout = gk_fopen(filename, "wb", "da_csr_Write: fpout"); fwrite(&nr, sizeof(int32_t), 1, fpout); fwrite(&nc, sizeof(int32_t), 1, fpout); fwrite(bptr, sizeof(int32_t), nc + 1, fpout); fwrite(bind, sizeof(int32_t), nnz, fpout); if (writevals) fwrite(bval, sizeof(val_t), nnz, fpout); da_csr_WriteClean(&bptr, &bind, &bval, filename, fpout); return; } if (format == DA_FMT_BIJV) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); assert(mat->nrows <= INT32_MAX); assert(mat->ncols <= INT32_MAX); assert(mat->rowptr[mat->nrows] <= SIZE_MAX); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&nr, sizeof(int32_t), 1, fpout); fwrite(&nc, sizeof(int32_t), 1, fpout); fwrite(&nnz, sizeof(size_t), 1, fpout); fwrite(&writevals, sizeof(int32_t), 1, fpout); for (i = 0; i < nr; i++) { edge[0] = i; for (j = bptr[i]; j < bptr[i+1]; j++) { edge[1] = bind[j]; fwrite(edge, sizeof(int32_t), 2, fpout); if (writevals) fwrite(bval+j, sizeof(float), 1, fpout); } } da_csr_WriteClean(&bptr, &bind, &bval, filename, fpout); return; } if (format == DA_FMT_METIS) gk_errexit(SIGERR, "METIS output format is not currently supported.\n"); if (filename) fpout = gk_fopen(filename, "w", "da_csr_Write: fpout"); else fpout = stdout; if (format == DA_FMT_IJV) { numbering = (numbering ? 1 : 0); for (i = 0; i < mat->nrows; i++) { for (j = bptr[i]; j < bptr[i+1]; j++) { if (writevals) fprintf(fpout, "%zd %d %.8f\n", i + numbering, bind[j] + numbering, bval[j]); else fprintf(fpout, "%zd %d\n", i + numbering, bind[j] + numbering); } } if (fpout) gk_fclose(fpout); return; } if (format == DA_FMT_SMAT) { // writevals and numbering are ignored for this format if (!mat->rowptr) gk_errexit(3, "da_csr_Write: Row data needed for MV_FMT_SMAT format SMAT file."); // Matlab requires columns to be ordered - copy matrix temporarily to order columns tmp = da_csr_Copy(mat); da_csr_LoadBases(tmp); // also sorts index idx_t row, col; for (col = row = i = 0; i < tmp->ncols; i++) for (j = tmp->colptr[i]; j < tmp->colptr[i + 1]; j++) { col = i + 1; row = tmp->colind[j] + 1; fprintf(fpout, PRNT_IDXTYPE "\t" PRNT_IDXTYPE "\t%.17g\n", row, col, tmp->colval[j]); } if (row != mat->nrows || col != mat->ncols) fprintf(fpout, PRNT_IDXTYPE "\t" PRNT_IDXTYPE "\t%.17g\n", mat->nrows, mat->ncols, 0.0); da_csr_Free(&tmp); if (fpout) gk_fclose(fpout); return; } assert(mat->nrows <= INT32_MAX); assert(mat->ncols <= INT32_MAX); assert(mat->rowptr[mat->nrows] <= SIZE_MAX); if (format == DA_FMT_CLUTO) { fprintf(fpout, "%d %d %zu\n", nr, nc, nnz); if (mat->rowval) writevals = 1; numbering = 1; } for (i = 0; i < nr; i++) { for (j = bptr[i]; j < bptr[i+1]; j++) { fprintf(fpout, " %d", bind[j] + (numbering ? 1 : 0)); if (writevals) fprintf(fpout, " %f", bval[j]); } fprintf(fpout, "\n"); } da_csr_WriteClean(&bptr, &bind, &bval, filename, fpout); } /**************************************************************************/ /*! Prints the row based representation of the matrix to screen. \param mat is the matrix to be printed. */ /**************************************************************************/ void da_csr_Print(da_csr_t *mat) { da_csr_Write(mat, NULL, DA_FMT_CLUTO, 1, 1); } /*************************************************************************/ /*! Check if a text (non-binary) text file containing a csr is in CLUTO or CSR format. \param file is the matrix file to be checked. \return the CSR format: MV_FMT_CLUTO or MV_FMT_CSR */ /*************************************************************************/ char da_csr_isClutoOrCsr(char *file) { size_t nrows, nnz; da_getfilestats(file, &nrows, &nnz, NULL, NULL); return (nnz%2 == 1) ? DA_FMT_CLUTO : DA_FMT_CSR; } /*************************************************************************/ /*! Prunes certain rows/columns of the matrix. The pruning takes place by analyzing the row structure of the matrix. The pruning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param mat the matrix to be pruned, \param what indicates if the rows (DA_ROW) or the columns (DA_COL) of the matrix will be pruned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the pruned matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ da_csr_t *da_csr_Prune(da_csr_t *mat, char what, idx_t minf, idx_t maxf) { ssize_t i, j, nnz; idx_t nrows, ncols; ptr_t *rowptr, *nrowptr; idx_t *rowind, *nrowind, *collen; val_t *rowval, *nrowval; da_csr_t *nmat; nmat = da_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = da_pmalloc(nrows+1, "da_csr_Prune: nrowptr"); nrowind = nmat->rowind = da_imalloc(rowptr[nrows], "da_csr_Prune: nrowind"); nrowval = nmat->rowval = da_vmalloc(rowptr[nrows], "da_csr_Prune: nrowval"); switch (what) { case DA_COL: collen = da_ismalloc(ncols, 0, "da_csr_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case DA_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: da_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown pruning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight entries whose sum accounts for a certain fraction of the overall weight of the row/column. \param mat the matrix to be pruned, \param what indicates if the rows (DA_ROW) or the columns (DA_COL) of the matrix will be pruned, \param norm indicates the norm that will be used to aggregate the weights and possible values are 1 or 2, \param fraction is the fraction of the overall norm that will be retained by the kept entries. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ da_csr_t *da_csr_LowFilter(da_csr_t *mat, char what, char norm, float fraction) { ssize_t i, j, nnz; idx_t nrows, ncols, ncand, maxlen=0; ptr_t *rowptr, *colptr, *nrowptr; idx_t *rowind, *colind, *nrowind; val_t *rowval, *colval, *nrowval, rsum, tsum; da_csr_t *nmat; da_ivkv_t *cand; nmat = da_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = da_pmalloc(nrows+1, "da_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = da_imalloc(rowptr[nrows], "da_csr_LowFilter: nrowind"); nrowval = nmat->rowval = da_vmalloc(rowptr[nrows], "da_csr_LowFilter: nrowval"); switch (what) { case DA_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); da_pcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = da_ivkvmalloc(maxlen, "da_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<ncols; i++) { for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].key = colind[j]; cand[ncand].val = colval[j]; tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]); } da_ivkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].val : cand[j].val*cand[j].val); nrowind[nrowptr[cand[j].key]] = i; nrowval[nrowptr[cand[j].key]] = cand[j].val; nrowptr[cand[j].key]++; } } gk_free((void **)&cand, LTERM); } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); break; case DA_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); for (i=0; i<nrows; i++) maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = da_ivkvmalloc(maxlen, "da_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<nrows; i++) { for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].key = rowind[j]; cand[ncand].val = rowval[j]; tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]); } da_ivkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].val : cand[j].val*cand[j].val); nrowind[rowptr[i]+j] = cand[j].key; nrowval[rowptr[i]+j] = cand[j].val; } nrowptr[i+1] = rowptr[i]+j; } gk_free((void **)&cand, LTERM); } /* compact nrowind/nrowval */ nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i+1] = nnz; } break; default: da_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown pruning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight top-K entries along each row/column and those entries whose weight is greater than a specified value. \param mat the matrix to be pruned, \param what indicates if the rows (DA_ROW) or the columns (DA_COL) of the matrix will be pruned, \param topk is the number of the highest weight entries to keep. \param keepval is the weight of a term above which will be kept. This is used to select additional terms past the first topk. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ da_csr_t *da_csr_topKPlusFilter(da_csr_t *mat, char what, idx_t topk, val_t keepval) { ssize_t i, j, k, nnz; idx_t nrows, ncols, ncand; ptr_t *rowptr, *colptr, *nrowptr; idx_t *rowind, *colind, *nrowind; val_t *rowval, *colval, *nrowval; da_csr_t *nmat; da_ivkv_t *cand; nmat = da_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = da_pmalloc(nrows+1, "da_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = da_imalloc(rowptr[nrows], "da_csr_LowFilter: nrowind"); nrowval = nmat->rowval = da_vmalloc(rowptr[nrows], "da_csr_LowFilter: nrowval"); switch (what) { case DA_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); cand = da_ivkvmalloc(nrows, "da_csr_LowFilter: cand"); da_pcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) { for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].key = colind[j]; cand[ncand].val = colval[j]; } da_ivkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++) { nrowind[nrowptr[cand[j].key]] = i; nrowval[nrowptr[cand[j].key]] = cand[j].val; nrowptr[cand[j].key]++; } for (; j<ncand; j++) { if (cand[j].val < keepval) break; nrowind[nrowptr[cand[j].key]] = i; nrowval[nrowptr[cand[j].key]] = cand[j].val; nrowptr[cand[j].key]++; } } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&cand, LTERM); break; case DA_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); cand = da_ivkvmalloc(ncols, "da_csr_LowFilter: cand"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].key = rowind[j]; cand[ncand].val = rowval[j]; } da_ivkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++, nnz++) { nrowind[nnz] = cand[j].key; nrowval[nnz] = cand[j].val; } for (; j<ncand; j++, nnz++) { if (cand[j].val < keepval) break; nrowind[nnz] = cand[j].key; nrowval[nnz] = cand[j].val; } nrowptr[i+1] = nnz; } gk_free((void **)&cand, LTERM); break; default: da_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown pruning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the terms whose contribution to the total length of the document is greater than a user-supplied multiple over the average. This routine assumes that the vectors are normalized to be unit length. \param mat the matrix to be pruned, \param what indicates if the rows (DA_ROW) or the columns (DA_COL) of the matrix will be pruned, \param zscore is the multiplicative factor over the average contribution to the length of the document. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ da_csr_t *da_csr_ZScoreFilter(da_csr_t *mat, char what, float zscore) { ssize_t i, j, nnz; idx_t nrows; ptr_t *rowptr, *nrowptr; idx_t *rowind, *nrowind; val_t *rowval, *nrowval, avgwgt; da_csr_t *nmat; nmat = da_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = da_pmalloc(nrows+1, "da_csr_ZScoreFilter: nrowptr"); nrowind = nmat->rowind = da_imalloc(rowptr[nrows], "da_csr_ZScoreFilter: nrowind"); nrowval = nmat->rowval = da_vmalloc(rowptr[nrows], "da_csr_ZScoreFilter: nrowval"); switch (what) { case DA_COL: gk_errexit(SIGERR, "This has not been implemented yet.\n"); break; case DA_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { avgwgt = zscore/(rowptr[i+1]-rowptr[i]); for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] > avgwgt) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } break; default: da_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown pruning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Compacts the column-space of the matrix by removing empty columns. As a result of the compaction, the column numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. The new columns are ordered in decreasing frequency. \param mat the matrix whose empty columns will be removed. */ /**************************************************************************/ void da_csr_CompactColumns(da_csr_t *mat) { ssize_t i; idx_t nrows, ncols, nncols; ptr_t *rowptr; idx_t *rowind, *colmap; da_iikv_t *clens; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; colmap = da_imalloc(ncols, "da_csr_CompactColumns: colmap"); clens = da_iikvmalloc(ncols, "da_csr_CompactColumns: clens"); for (i=0; i<ncols; i++) { clens[i].key = i; clens[i].val = 0; } for (i=0; i<rowptr[nrows]; i++) clens[rowind[i]].val++; da_iikvsortd(ncols, clens); for (nncols=0, i=0; i<ncols; i++) { if (clens[i].val > 0) colmap[clens[i].key] = nncols++; else break; } for (i=0; i<rowptr[nrows]; i++) rowind[i] = colmap[rowind[i]]; mat->ncols = nncols; gk_free((void **)&colmap, &clens, LTERM); } /*************************************************************************/ /*! Compacts the row-space of the matrix by removing empty rows. As a result of the compaction, the row numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. \param mat the matrix whose empty rows will be removed. */ /**************************************************************************/ void da_csr_CompactRows(da_csr_t *mat) { ssize_t i, j; idx_t nrows; ptr_t *rowptr; nrows = mat->nrows; rowptr = mat->rowptr; for(j=0, i=0; i < nrows; i++){ rowptr[j] = rowptr[i]; if(rowptr[i+1] > rowptr[i]) j++; } rowptr[j+1] = rowptr[nrows]; mat->nrows = j; mat->rowptr = da_prealloc(mat->rowptr, j+1, "da_csr_CompactRows: mat->rowptr realloc"); } /*************************************************************************/ /*! Sorts the indices in increasing order (whether matrix has values or not) \param mat the matrix itself, \param what is either DA_ROW or DA_COL indicating which set of indices to sort. */ /**************************************************************************/ void da_csr_SortIndices(da_csr_t *mat, char what) { ptr_t n, nn=0; ptr_t *ptr; idx_t *ind; val_t *val; switch (what) { case DA_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; val = mat->rowval; break; case DA_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; val = mat->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; da_pikv_t *cand; val_t *tval = NULL; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = da_pikvmalloc(nn, "da_csr_SortIndices: cand"); if(val){ tval = da_vmalloc(nn, "da_csr_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].key = j-ptr[i]; cand[j-ptr[i]].val = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { da_pikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].val; val[j] = tval[cand[j-ptr[i]].key]; } } } } else { #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].key = j-ptr[i]; cand[j-ptr[i]].val = ind[j]; } if (k) { da_pikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) ind[j] = cand[j-ptr[i]].val; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Checks that an index is sorted \param mat the matrix itself, \param what is either DA_ROW or DA_COL indicating which set of indices to check. */ /**************************************************************************/ char da_csr_CheckSortedIndex(da_csr_t *mat, char what) { ptr_t n, nn=0; ptr_t *ptr; idx_t *ind; ssize_t i, j; char k=1; switch (what) { case DA_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; break; case DA_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return k; } for (k=1, i=0; i < n && k == 1; i++) { for (j=ptr[i]; j < ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]){ k = 0; /* an inversion */ break; } } } return k; } /*************************************************************************/ /*! Creates a row/column index from the column/row data. \param mat the matrix itself, \param what is either DA_ROW or DA_COL indicating which index will be created. */ /**************************************************************************/ void da_csr_CreateIndex(da_csr_t *mat, char what) { /* 'f' stands for forward, 'r' stands for reverse */ ssize_t i, j, k, nf, nr; ptr_t *fptr, *rptr; idx_t *find, *rind; val_t *fval, *rval; switch (what) { case DA_COL: nf = mat->nrows; fptr = mat->rowptr; find = mat->rowind; fval = mat->rowval; if (mat->colptr) gk_free((void **)&mat->colptr, LTERM); if (mat->colind) gk_free((void **)&mat->colind, LTERM); if (mat->colval) gk_free((void **)&mat->colval, LTERM); nr = mat->ncols; rptr = mat->colptr = da_psmalloc(nr+1, 0, "da_csr_CreateIndex: rptr"); rind = mat->colind = da_imalloc(fptr[nf], "da_csr_CreateIndex: rind"); rval = mat->colval = (fval ? da_vmalloc(fptr[nf], "da_csr_CreateIndex: rval") : NULL); break; case DA_ROW: nf = mat->ncols; fptr = mat->colptr; find = mat->colind; fval = mat->colval; if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM); if (mat->rowind) gk_free((void **)&mat->rowind, LTERM); if (mat->rowval) gk_free((void **)&mat->rowval, LTERM); nr = mat->nrows; rptr = mat->rowptr = da_psmalloc(nr+1, 0, "da_csr_CreateIndex: rptr"); rind = mat->rowind = da_imalloc(fptr[nf], "da_csr_CreateIndex: rind"); rval = mat->rowval = (fval ? da_vmalloc(fptr[nf], "da_csr_CreateIndex: rval") : NULL); break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rptr[find[j]]++; } MAKECSR(i, nr, rptr); if (rptr[nr] > 6*nr) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } SHIFTCSR(i, nr, rptr); if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rval[rptr[find[j]]++] = fval[j]; } SHIFTCSR(i, nr, rptr); } } else { if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) { k = find[j]; rind[rptr[k]] = i; rval[rptr[k]++] = fval[j]; } } } else { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } } SHIFTCSR(i, nr, rptr); } } /*************************************************************************/ /*! Normalizes the rows/columns of the matrix to be unit length. \param mat the matrix itself, \param what indicates what will be normalized and is obtained by specifying DA_ROW, DA_COL, DA_ROW|DA_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void da_csr_Normalize(da_csr_t *mat, char what, char norm) { ssize_t i, j; idx_t n; ptr_t *ptr; val_t *val, sum; if ((what & DA_ROW) && mat->rowval) { n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; #pragma omp parallel if (ptr[n] > DA_OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if ((what & DA_COL) && mat->colval) { n = mat->ncols; ptr = mat->colptr; val = mat->colval; #pragma omp parallel if (ptr[n] > DA_OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } /*************************************************************************/ /*! Applies different row scaling methods. \param mat the matrix itself, \param type indicates the type of row scaling. Possible values are: DA_SCALE_MAXTF, DA_SCALE_SQRT, DA_SCALE_LOG, DA_SCALE_IDF, DA_SCALE_MAXTF2. */ /**************************************************************************/ void da_csr_Scale(da_csr_t *mat, char type) { ssize_t i, j; idx_t nrows, ncols, nnzcols, bgfreq; ptr_t *rowptr; idx_t *rowind, *collen; val_t *rowval, *cscale, maxtf; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; switch (type) { case DA_SCALE_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .5 + .5*rowval[j]/maxtf; } } break; case DA_SCALE_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .1 + .9*rowval[j]/maxtf; } } break; case DA_SCALE_SQRT: /* TF' = .1+SQRT(TF) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j]))); } } } break; case DA_SCALE_POW25: /* TF' = .1+POW(TF,.25) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j])))); } } } break; case DA_SCALE_POW65: /* TF' = .1+POW(TF,.65) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65)); } } } break; case DA_SCALE_POW75: /* TF' = .1+POW(TF,.75) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75)); } } } break; case DA_SCALE_POW85: /* TF' = .1+POW(TF,.85) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85)); } } } break; case DA_SCALE_LOG: /* TF' = 1+log_2(TF) */ #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { double logscale = 1.0/log(2.0); #pragma omp for schedule(static,32) for (i=0; i<rowptr[nrows]; i++) { if (rowval[i] != 0.0) rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale; } } break; case DA_SCALE_IDF: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = da_vmalloc(ncols, "da_csr_Scale: cscale"); collen = da_ismalloc(ncols, 0, "da_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } #pragma omp parallel if (ncols > DA_OMPMINOPS) { #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0); } #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; case DA_SCALE_IDF2: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = da_vmalloc(ncols, "da_csr_Scale: cscale"); collen = da_ismalloc(ncols, 0, "da_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } nnzcols = 0; #pragma omp parallel if (ncols > DA_OMPMINOPS) { #pragma omp for schedule(static) reduction(+:nnzcols) for (i=0; i<ncols; i++) nnzcols += (collen[i] > 0 ? 1 : 0); bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols)); printf("nnz: " PRNT_PTRTYPE ", nnzcols: " PRNT_IDXTYPE ", bgfreq: " PRNT_IDXTYPE "\n", rowptr[nrows], nnzcols, bgfreq); #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0); } #pragma omp parallel if (rowptr[nrows] > DA_OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; default: gk_errexit(SIGERR, "Unknown scaling type of %d\n", type); break; } } /*************************************************************************/ /*! Computes the sums of the rows/columns \param mat the matrix itself, \param what is either DA_ROW or DA_COL indicating which sums to compute. */ /**************************************************************************/ void da_csr_ComputeSums(da_csr_t *mat, char what) { ssize_t i; idx_t n; ptr_t *ptr; val_t *val, *sums; switch (what) { case DA_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rsums) gk_free((void **)&mat->rsums, LTERM); sums = mat->rsums = da_vsmalloc(n, 0, "da_csr_ComputeSums: sums"); break; case DA_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->csums) gk_free((void **)&mat->csums, LTERM); sums = mat->csums = da_vsmalloc(n, 0, "da_csr_ComputeSums: sums"); break; default: gk_errexit(SIGERR, "Invalid sum type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > DA_OMPMINOPS) schedule(static) for (i=0; i<n; i++) sums[i] = da_vsum(ptr[i+1]-ptr[i], val+ptr[i], 1); } /*************************************************************************/ /*! Computes the squared of the norms of the rows/columns \param mat the matrix itself, \param what is either DA_ROW or DA_COL indicating which squared norms to compute. */ /**************************************************************************/ void da_csr_ComputeSquaredNorms(da_csr_t *mat, char what) { ssize_t i; idx_t n; ptr_t *ptr; val_t *val, *norms; switch (what) { case DA_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM); norms = mat->rnorms = da_vsmalloc(n, 0, "da_csr_ComputeSums: norms"); break; case DA_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM); norms = mat->cnorms = da_vsmalloc(n, 0, "da_csr_ComputeSums: norms"); break; default: gk_errexit(SIGERR, "Invalid norm type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > DA_OMPMINOPS) schedule(static) for (i=0; i<n; i++) norms[i] = da_vdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1); } /*************************************************************************/ /*! Computes the similarity between two rows/columns \param mat the matrix itself. The routine assumes that the indices are sorted in increasing order. \param i1 is the first row/column, \param i2 is the second row/column, \param what is either DA_ROW or DA_COL indicating the type of objects between the similarity will be computed, \param simtype is the type of similarity and is one of DA_SIM_COS, DA_SIM_JAC, DA_SIM_MIN, DA_SIM_AMIN \returns the similarity between the two rows/columns. */ /**************************************************************************/ val_t da_csr_ComputeSimilarity(da_csr_t *mat, idx_t i1, idx_t i2, char what, char simtype) { idx_t nind1, nind2; idx_t *ind1, *ind2; val_t *val1, *val2, stat1, stat2, sim; switch (what) { case DA_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); nind1 = mat->rowptr[i1+1]-mat->rowptr[i1]; nind2 = mat->rowptr[i2+1]-mat->rowptr[i2]; ind1 = mat->rowind + mat->rowptr[i1]; ind2 = mat->rowind + mat->rowptr[i2]; val1 = mat->rowval + mat->rowptr[i1]; val2 = mat->rowval + mat->rowptr[i2]; break; case DA_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); nind1 = mat->colptr[i1+1]-mat->colptr[i1]; nind2 = mat->colptr[i2+1]-mat->colptr[i2]; ind1 = mat->colind + mat->colptr[i1]; ind2 = mat->colind + mat->colptr[i2]; val1 = mat->colval + mat->colptr[i1]; val2 = mat->colval + mat->colptr[i2]; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return 0.0; } switch (simtype) { case DA_SIM_COS: case DA_SIM_JAC: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]*val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]*val2[i2]; i2++; } else { sim += val1[i1]*val2[i2]; stat1 += val1[i1]*val1[i1]; stat2 += val2[i2]*val2[i2]; i1++; i2++; } } if (simtype == DA_SIM_COS) sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0); else sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case DA_SIM_MIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case DA_SIM_AMIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1 > 0.0 ? sim/stat1 : 0.0); break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } return sim; } /*************************************************************************/ /*! Check of two matrices are equal. Note that one of the matrices may have an * additional index created to facilitate the comparison \param a is the matrix to be compared. \param b is the matrix to be compared against. \param p is the precision to be used in the comparison. \returns 1 if matrices have the same elements, 0 otherwise. */ /**************************************************************************/ char da_csr_Compare(da_csr_t *a, da_csr_t *b, double p) { if(!(a && b)) return 0; if(a->ncols != b->ncols || a->nrows != b->nrows) return 0; ptr_t nnz = 0; assert((a->rowptr && b->rowptr) || (a->colptr && b->colptr)); if(a->rowptr && b->rowptr){ if(a->rowptr[a->nrows] != b->rowptr[b->nrows]) return 0; nnz = a->rowptr[a->nrows]; if(!da_parreq(a->nrows+1, a->rowptr, b->rowptr)) return 0; if(!da_iarreq(nnz, a->rowind, b->rowind)) return 0; if(a->rowval && b->rowval && !da_varreq_p(nnz, a->rowval, b->rowval, p)) return 0; else if((a->rowval && !b->rowval) || (!a->rowval && b->rowval)) return 0; } else if(a->colptr && b->colptr){ if(a->colptr[a->ncols] != b->colptr[b->ncols]) return 0; nnz = a->colptr[a->ncols]; if(!da_parreq(a->ncols+1, a->colptr, b->colptr)) return 0; if(!da_iarreq(nnz, a->colind, b->colind)) return 0; if(a->rowval && b->rowval && !da_varreq_p(nnz, a->colval, b->colval, p)) return 0; else if((a->rowval && !b->rowval) || (!a->rowval && b->rowval)) return 0; } else { return 0; } return 1; } /** * Increase the space needed for storing nnzs in a csr matrix * \param mat The matrix to have its nnz space re-allocated * \param newNnz The new size of the row/col ind/val arrays */ void da_csr_Grow(da_csr_t *mat, ptr_t newNnz) { if(mat->rowind){ mat->rowind = da_irealloc(mat->rowind, newNnz, "da_csr_matrixNNzRealloc: mat->rowind"); if(mat->rowind == NULL) gk_errexit(SIGERR, "da_csr_matrixNNzRealloc: Could not reallocate mat->rowind size %zu.\n", newNnz); } if(mat->rowval){ mat->rowval = da_vrealloc(mat->rowval, newNnz, "da_csr_matrixNNzRealloc: mat->rowval"); if(mat->rowval == NULL) gk_errexit(SIGERR, "da_csr_matrixNNzRealloc: Could not reallocate mat->rowval size %zu.\n", newNnz); } if(mat->colind){ mat->colind = da_irealloc(mat->colind, newNnz, "da_csr_matrixNNzRealloc: mat->colind"); if(mat->colind == NULL) gk_errexit(SIGERR, "da_csr_matrixNNzRealloc: Could not reallocate mat->colind size %zu.\n", newNnz); } if(mat->colval){ mat->colval = da_vrealloc(mat->colval, newNnz, "da_csr_matrixNNzRealloc: mat->colval"); if(mat->colval == NULL) gk_errexit(SIGERR, "da_csr_matrixNNzRealloc: Could not reallocate mat->colval size %zu.\n", newNnz); } } /** * Compute a partial dot product of two vectors */ val_t da_csr_partialDotProduct(const ptr_t *rowptr, const ptr_t *endptr, const idx_t *rowind, const val_t *rowval, idx_t a, idx_t b) { val_t sum = 0.0; ptr_t aidx, bidx; if ( endptr[a] <= rowptr[a] || endptr[b] <= rowptr[b] ) { // one of the two is a null vector. return sum; } aidx = rowptr[a]; bidx = rowptr[b]; for ( ; aidx < endptr[a] && bidx < endptr[b]; ) { if ( rowind[aidx] < rowind[bidx] ) aidx++; else if ( rowind[bidx] < rowind[aidx] ) bidx++; else { sum += rowval[aidx] * rowval[bidx]; aidx++; bidx++; } } return sum; } /** * Compute the dot product of two vectors */ val_t da_csr_dotProduct(const ptr_t *rowptr, const idx_t *rowind, const val_t *rowval, idx_t a, idx_t b) { val_t sum = 0.0; ptr_t aidx, bidx; if ( rowptr[a+1] <= rowptr[a] || rowptr[b+1] <= rowptr[b] ) { // one of the two is a null vector. return sum; } for (aidx = rowptr[a], bidx = rowptr[b]; aidx < rowptr[a+1] && bidx < rowptr[b+1]; ) { if ( rowind[aidx] < rowind[bidx] ) aidx++; else if ( rowind[bidx] < rowind[aidx] ) bidx++; else { sum += rowval[aidx] * rowval[bidx]; aidx++; bidx++; } } return sum; } /** * Find similar rows in the matrix */ idx_t da_csr_GetSimilarSmallerRows(da_csr_t *mat, idx_t rid, char noSelfSim, idx_t nqterms, idx_t *qind, val_t *qval, char simtype, idx_t nsim, float minsim, da_ivkv_t *hits, idx_t *i_marker, da_ivkv_t *i_cand) { ssize_t i, ii, j, k; idx_t nrows, ncols, ncand; ptr_t *colptr; idx_t *colind, *marker; val_t *colval, *rnorms, mynorm, *rsums, mysum; da_ivkv_t *cand; if (nqterms == 0) return 0; nrows = mat->nrows; ncols = mat->ncols; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; marker = (i_marker ? i_marker : da_ismalloc(nrows, -1, "da_csr_GetSimilarSmallerRows: marker")); cand = (i_cand ? i_cand : da_ivkvmalloc(nrows, "da_csr_GetSimilarSmallerRows: cand")); switch (simtype) { case DA_SIM_COS: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if(k > rid || (noSelfSim && k == rid)) continue; if (marker[k] == -1) { cand[ncand].key = k; cand[ncand].val = 0; marker[k] = ncand++; } cand[marker[k]].val += colval[j]*qval[ii]; } } } break; case DA_SIM_JAC: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if(k > rid || (noSelfSim && k == rid)) continue; if (marker[k] == -1) { cand[ncand].key = k; cand[ncand].val = 0; marker[k] = ncand++; } cand[marker[k]].val += colval[j]*qval[ii]; } } } rnorms = mat->rnorms; mynorm = da_vdot(nqterms, qval, 1, qval, 1); for (i=0; i<ncand; i++) cand[i].val = cand[i].val/(rnorms[cand[i].key]+mynorm-cand[i].val); break; case DA_SIM_MIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if(k > rid || (noSelfSim && k == rid)) continue; if (marker[k] == -1) { cand[ncand].key = k; cand[ncand].val = 0; marker[k] = ncand++; } cand[marker[k]].val += gk_min(colval[j], qval[ii]); } } } rsums = mat->rsums; mysum = da_vsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].val = cand[i].val/(rsums[cand[i].key]+mysum-cand[i].key); break; /* Assymetric MIN similarity */ case DA_SIM_AMIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if(k > rid || (noSelfSim && k == rid)) continue; if (marker[k] == -1) { cand[ncand].key = k; cand[ncand].val = 0; marker[k] = ncand++; } cand[marker[k]].val += gk_min(colval[j], qval[ii]); } } } mysum = da_vsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].val = cand[i].val/mysum; break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } /* go and prune the hits that are bellow minsim */ for (j=0, i=0; i<ncand; i++) { marker[cand[i].key] = -1; if (cand[i].val >= minsim) cand[j++] = cand[i]; } ncand = j; if (nsim == -1 || nsim >= ncand) { nsim = ncand; } else { nsim = gk_min(nsim, ncand); da_ivkvkselectd(ncand, nsim, cand); da_ivkvsortd(nsim, cand); } da_ivkvcopy(nsim, cand, hits); if (i_marker == NULL) gk_free((void **)&marker, LTERM); if (i_cand == NULL) gk_free((void **)&cand, LTERM); return nsim; } /*************************************************************************/ /*! This function gets some basic statistics about the file. Same as GK's version * but handles comment lines. \param fname is the name of the file \param r_nlines is the number of lines in the file. If it is NULL, this information is not returned. \param r_ntokens is the number of tokens in the file. If it is NULL, this information is not returned. \param r_max_nlntokens is the maximum number of tokens in any line in the file. If it is NULL this information is not returned. \param r_nbytes is the number of bytes in the file. If it is NULL, this information is not returned. */ /*************************************************************************/ void da_getfilestats(char *fname, size_t *r_nlines, size_t *r_ntokens, size_t *r_max_nlntokens, size_t *r_nbytes) { size_t nlines=0, ntokens=0, max_nlntokens=0, nbytes=0, oldntokens=0, nread; int32_t intoken=0, lineType=0; //lineType 0-not started, 1-started ok line, 2-comment line char buffer[2049], *cptr; FILE *fpin; fpin = gk_fopen(fname, "r", "da_getfilestats"); while (!feof(fpin)) { nread = fread(buffer, sizeof(char), 2048, fpin); nbytes += nread; buffer[nread] = '\0'; /* There is space for this one */ for (cptr=buffer; *cptr!='\0'; cptr++) { if (*cptr == '%' && lineType == 0){ lineType = 2; } else if (*cptr == '\n') { if(lineType != 2){ nlines += lineType; ntokens += intoken; } intoken = 0; lineType = 0; if (max_nlntokens < ntokens-oldntokens) max_nlntokens = ntokens-oldntokens; oldntokens = ntokens; } else if (*cptr == ' ' || *cptr == '\t') { ntokens += intoken; intoken = 0; } else if(lineType != 2){ intoken = 1; lineType = 1; } } } ntokens += intoken; if (max_nlntokens < ntokens-oldntokens) max_nlntokens = ntokens-oldntokens; gk_fclose(fpin); if (r_nlines != NULL) *r_nlines = nlines; if (r_ntokens != NULL) *r_ntokens = ntokens; if (r_max_nlntokens != NULL) *r_max_nlntokens = max_nlntokens; if (r_nbytes != NULL) *r_nbytes = nbytes; }
GB_emult_04.c
//------------------------------------------------------------------------------ // GB_emult_04: C<M>= A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity // structure as M, and its pattern is a subset of M. // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse bitmap bitmap (method: 04) // sparse sparse bitmap full (method: 04) // sparse sparse full bitmap (method: 04) // sparse sparse full full (method: 04) // TODO: this function can also do eWiseAdd, just as easily. // Just change the "&&" to "||" in the GB_emult_04_template. // If A and B are both full, eadd and emult are identical. #include "GB_ewise.h" #include "GB_emult.h" #include "GB_binop.h" #include "GB_unused.h" #ifndef GBCOMPACT #include "GB_binop__include.h" #endif #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Work, int64_t) ; \ GB_WERK_POP (M_ek_slicing, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } GrB_Info GB_emult_04 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full ( GrB_Matrix C, // output matrix, static header const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix M, // sparse/hyper, not NULL const bool Mask_struct, // if true, use the only structure of M bool *mask_applied, // if true, the mask was applied const GrB_Matrix A, // input A matrix (bitmap/full) const GrB_Matrix B, // input B matrix (bitmap/full) const GrB_BinaryOp op, // op to perform C = op (A,B) GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ; ASSERT_MATRIX_OK (M, "M for emult_04", GB0) ; ASSERT_MATRIX_OK (A, "A for emult_04", GB0) ; ASSERT_MATRIX_OK (B, "B for emult_04", GB0) ; ASSERT_BINARYOP_OK (op, "op for emult_04", GB0) ; ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ; ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ; int C_sparsity = GB_sparsity (M) ; GBURBLE ("emult_04:(%s<%s>=%s.*%s) ", GB_sparsity_char (C_sparsity), GB_sparsity_char_matrix (M), GB_sparsity_char_matrix (A), GB_sparsity_char_matrix (B)) ; //-------------------------------------------------------------------------- // declare workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Work, int64_t) ; int64_t *restrict Wfirst = NULL ; int64_t *restrict Wlast = NULL ; int64_t *restrict Cp_kfirst = NULL ; GB_WERK_DECLARE (M_ek_slicing, int64_t) ; //-------------------------------------------------------------------------- // get M, A, and B //-------------------------------------------------------------------------- const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ; const int64_t vlen = M->vlen ; const int64_t vdim = M->vdim ; const int64_t nvec = M->nvec ; const int64_t mnz = GB_nnz (M) ; const size_t msize = M->type->size ; const int8_t *restrict Ab = A->b ; const int8_t *restrict Bb = B->b ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate C->p and C->h //-------------------------------------------------------------------------- GB_OK (GB_new (&C, // sparse or hyper (same as M), existing header ctype, vlen, vdim, GB_Ap_calloc, C_is_csc, C_sparsity, M->hyper_switch, nvec, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // slice the mask matrix M //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int M_ntasks, M_nthreads ; GB_SLICE_MATRIX (M, 8, chunk) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ; if (Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Wfirst = Work ; Wlast = Work + M_ntasks ; Cp_kfirst = Work + M_ntasks * 2 ; //-------------------------------------------------------------------------- // count entries in C //-------------------------------------------------------------------------- // This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR). // TODO: if M is structural and A and B are both full, then C has exactly // the same pattern as M, the first phase can be skipped. int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < M_ntasks ; tid++) { int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // count the entries in C(:,j) int64_t j = GBH (Mh, k) ; int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j) int64_t pM, pM_end ; GB_get_pA (&pM, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, vlen) ; int64_t cjnz = 0 ; for ( ; pM < pM_end ; pM++) { bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t i = Mi [pM] ; cjnz += (GBB (Ab, pstart + i) && // TODO: for GB_add, use || instead GBB (Bb, pstart + i)) ; } } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } //-------------------------------------------------------------------------- // finalize Cp, cumulative sum of Cp and compute Cp_kfirst //-------------------------------------------------------------------------- GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ; GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec, Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ; //-------------------------------------------------------------------------- // allocate C->i and C->x //-------------------------------------------------------------------------- int64_t cnz = Cp [nvec] ; // set C->iso = C_iso OK GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // copy pattern into C //-------------------------------------------------------------------------- // TODO: could make these components of C shallow instead if (GB_IS_HYPERSPARSE (M)) { // copy M->h into C->h GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ; } C->nvec = nvec ; C->jumbled = M->jumbled ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // get the opcode //-------------------------------------------------------------------------- GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; bool op_is_first = (opcode == GB_FIRST_binop_code) ; bool op_is_second = (opcode == GB_SECOND_binop_code) ; bool op_is_pair = (opcode == GB_PAIR_binop_code) ; GB_Type_code ccode = ctype->code ; //-------------------------------------------------------------------------- // check if the values of A and/or B are ignored //-------------------------------------------------------------------------- // With C = ewisemult (A,B), only the intersection of A and B is used. // If op is SECOND or PAIR, the values of A are never accessed. // If op is FIRST or PAIR, the values of B are never accessed. // If op is PAIR, the values of A and B are never accessed. // Contrast with ewiseadd. // A is passed as x, and B as y, in z = op(x,y) bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ; bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ; //-------------------------------------------------------------------------- // using a built-in binary operator (except for positional operators) //-------------------------------------------------------------------------- #define GB_PHASE_2_OF_2 if (C_iso) { //---------------------------------------------------------------------- // C is iso //---------------------------------------------------------------------- // Cx [0] = cscalar = op (A,B) GB_BURBLE_MATRIX (C, "(iso emult) ") ; memcpy (C->x, cscalar, csize) ; // pattern of C = set intersection of pattern of A and B #define GB_ISO_EMULT #include "GB_emult_04_template.c" } else { //---------------------------------------------------------------------- // C is non-iso //---------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT //------------------------------------------------------------------ // define the worker for the switch factory //------------------------------------------------------------------ #define GB_AemultB_04(mult,xname) GB (_AemultB_04_ ## mult ## xname) #define GB_BINOP_WORKER(mult,xname) \ { \ info = GB_AemultB_04(mult,xname) (C, M, Mask_struct, A, B, \ Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //------------------------------------------------------------------ // launch the switch factory //------------------------------------------------------------------ GB_Type_code xcode, ycode, zcode ; if (!op_is_positional && GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern, op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode) { #define GB_NO_PAIR #include "GB_binop_factory.c" } #endif //---------------------------------------------------------------------- // generic worker //---------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (C, "(generic emult_04: %s) ", op->name) ; GB_ewise_generic (C, op, NULL, 0, 0, NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD4, Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0, M, Mask_struct, false, A, B, Context) ; } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C output for emult_04", GB0) ; (*mask_applied) = true ; return (GrB_SUCCESS) ; }
par_csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /*-------------------------------------------------------------------------- * hypre_ParMatmul_RowSizes: * * Computes sizes of C rows. Formerly part of hypre_ParMatmul but removed * so it can also be used for multiplication of Boolean matrices. * * Arrays computed: C_diag_i, C_offd_i. * * Arrays needed: (17, all HYPRE_Int*) * rownnz_A, * A_diag_i, A_diag_j, * A_offd_i, A_offd_j, * B_diag_i, B_diag_j, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j, * col_map_offd_B, col_map_offd_B, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j. * * Scalars computed: C_diag_size, C_offd_size. * * Scalars needed: * num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, * first_col_diag_B, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C *--------------------------------------------------------------------------*/ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int **C_diag_i, HYPRE_Int **C_offd_i, HYPRE_Int *rownnz_A, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Int *A_offd_i, HYPRE_Int *A_offd_j, HYPRE_Int *B_diag_i, HYPRE_Int *B_diag_j, HYPRE_Int *B_offd_i, HYPRE_Int *B_offd_j, HYPRE_Int *B_ext_diag_i, HYPRE_Int *B_ext_diag_j, HYPRE_Int *B_ext_offd_i, HYPRE_Int *B_ext_offd_j, HYPRE_Int *map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rownnz_A, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int size, rest, num_threads; HYPRE_Int ii, ns, ne; num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[ii1] = jj_row_begin_diag; (*C_offd_i)[ii1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct diag_i and offd_i - phase 1 */ if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { ii1 = rownnz_A ? rownnz_A[i1] : i1; (*C_diag_i)[ii1] += jj_count_diag; (*C_offd_i)[ii1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } /* Correct diag_i and offd_i - phase 2 */ if (rownnz_A != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i1 = ns; i1 < (ne-1); i1++) { for (ii1 = rownnz_A[i1] + 1; ii1 < rownnz_A[i1+1]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[i1+1]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[i1+1]]; } } if (ii < (num_threads - 1)) { for (ii1 = rownnz_A[ne-1] + 1; ii1 < rownnz_A[ne]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[ne]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[ne]]; } } else { for (ii1 = rownnz_A[ne-1] + 1; ii1 < num_rows_diag_A; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[num_rows_diag_A]; (*C_offd_i)[ii1] = (*C_offd_i)[num_rows_diag_A]; } } } } /* end parallel loop */ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul: * * Multiplies two ParCSRMatrices A and B and returns the product in * ParCSRMatrix C. * * Note: C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif /* ParCSRMatrix A */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rownnz_A; HYPRE_Int *rownnz_A = NULL; /* ParCSRMatrix B */ HYPRE_BigInt nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_BigInt last_col_diag_B; /* A_diag */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_diag_ir = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* A_offd */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *A_offd_ir = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); /* B_diag */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* B_offd */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); /* ParCSRMatrix C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C = NULL; /* C_diag */ hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; /* C_offd */ hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Int C_diag_size; /* Bs_ext */ hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (ncols_A != nrows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if C=A*B is square globally and locally, then C_diag should be square also */ if ( num_rows_diag_A == num_cols_diag_B && nrows_A == ncols_B ) { allsquare = 1; } /* Set rownnz of A */ if (num_rownnz_diag_A != num_rows_diag_A && num_rownnz_offd_A != num_rows_offd_A ) { hypre_MergeOrderedArrays(num_rownnz_diag_A, A_diag_ir, num_rownnz_offd_A, A_offd_ir, &num_rownnz_A, &rownnz_A); } else { num_rownnz_A = hypre_max(num_rows_diag_A, num_rows_offd_A); } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt) num_cols_diag_B - 1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); } } if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes(memory_location_C, &C_diag_i, &C_offd_i, rownnz_A, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[ns]; jj_count_offd = C_offd_i[ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, nrows_A, ncols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C, 0); hypre_ParCSRMatrixSetColStartsOwner(C, 0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixSetRownnz(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixSetRownnz(C_offd); hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } hypre_TFree(rownnz_A, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); HYPRE_BigInt first_row_index = row_starts[0]; num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] < 0) len++; } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] > 0) len++; } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = 0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i = 0; i < num_recvs; i++) { for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { B_ext_i[j+1] += B_ext_i[j]; } } *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; } for (i = 0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i = 0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; } for (i = 0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i = 0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i = 0; i < num_cols; i++) { AT_offd_i[i+1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i = 0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k = 0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i = num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i-1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i = 1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) { hypre_ParCSRMatrixOwnsColStarts(AT) = 0; } hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; } hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) { hypre_TFree(children, HYPRE_MEMORY_HOST); } /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; hypre_TFree(edges_marked, HYPRE_MEMORY_HOST); if (G_type != 0) { hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) { proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; } /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) { A_marker[i] = -1; } for (i = ns; i < ne; i++) { D_tmp[i] = 1.0/d[i]; } num_cols = C_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i = 0; i < num_cols_offd_B; i++) { A_marker[i] = -1; } num_cols = C_offd_i[ns]; for (i = ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul: * * Multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt nrows_A, ncols_A; HYPRE_BigInt nrows_B, ncols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (nrows_A != nrows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); hypre_CSRMatrixNumRownnz(C_tmp_offd) = 0; } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { temp[C_ext_offd_size++] = C_ext_j[j]; } else { C_ext_diag_size++; } } C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } for (i = 0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute: * C_diag = C_tmp_diag + C_ext_diag * C_offd = C_tmp_offd + C_ext_offd * * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } } } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else { C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else { C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } } break; } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, ncols_A, ncols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = ncols_A; hypre_ParCSRMatrixGlobalNumCols(C) = ncols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) { hypre_CSRMatrixSetRownnz(C_diag); hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_CSRMatrixSetRownnz(C_offd); hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i = 0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncCudaComputeStream(hypre_handle()); } #endif return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; j = 2; HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(part, hypre_ParVectorPartitioning(b), j*sizeof(HYPRE_BigInt)); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } j = 2; row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i-1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k) #endif for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i+1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /*-------------------------------------------------------------------------- * hypre_ParcsrAdd: performs C = alpha*A + beta*B * * A and B are assumed to have the same row and column partitionings * * TODO: Add OpenMP support * Use hypre_CSRMatrixAdd for diag and offd matrices *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParcsrAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **Cout ) { /* ParCSRMatrix data */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt num_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *rownnz_diag_A = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int nnz_diag_A = hypre_CSRMatrixNumNonzeros(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *rownnz_offd_A = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int nnz_offd_A = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_a = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Int *rownnz_diag_B = hypre_CSRMatrixRownnz(B_diag); HYPRE_Int num_rownnz_diag_B = hypre_CSRMatrixNumRownnz(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int nnz_diag_B = hypre_CSRMatrixNumNonzeros(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Complex *B_offd_a = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int *rownnz_offd_B = hypre_CSRMatrixRownnz(B_offd); HYPRE_Int num_rownnz_offd_B = hypre_CSRMatrixNumRownnz(B_offd); HYPRE_Int num_rows_offd_B = hypre_CSRMatrixNumRows(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int nnz_offd_B = hypre_CSRMatrixNumNonzeros(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd; /* C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C; HYPRE_BigInt *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Int num_cols_offd_C = num_cols_offd_A + num_cols_offd_B; HYPRE_BigInt *col_map_offd_C; HYPRE_Int nnz_diag_C_alloc = nnz_diag_A + nnz_diag_B; HYPRE_Int nnz_offd_C_alloc = nnz_offd_A + nnz_offd_B; HYPRE_Int nnz_diag_C = 0; HYPRE_Int nnz_offd_C = 0; HYPRE_Int *C_diag_i, *C_offd_i; HYPRE_Int *C_diag_j, *C_offd_j; HYPRE_Complex *C_diag_a, *C_offd_a; HYPRE_Int *marker_diag, *marker_offd; HYPRE_Int num_rownnz_diag_C; HYPRE_Int num_rownnz_offd_C; HYPRE_Int *rownnz_diag_C = NULL; HYPRE_Int *rownnz_offd_C = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int i, ii, j; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); HYPRE_ANNOTATE_FUNC_BEGIN; hypre_assert(num_rows_A == num_rows_B); hypre_assert(num_cols_A == num_cols_B); hypre_assert(num_rows_diag_A == num_rows_diag_B); hypre_assert(num_cols_diag_A == num_cols_diag_B); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); /* Allocate memory */ col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag_C_alloc, memory_location_C); C_diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag_C_alloc, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_offd_A + 1, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd_C_alloc, memory_location_C); C_offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd_C_alloc, memory_location_C); marker_diag = hypre_TAlloc(HYPRE_Int, num_cols_diag_A, HYPRE_MEMORY_HOST); marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_diag_A; i++) { marker_diag[i] = -1; } for (i = 0; i < num_cols_offd_C; i++) { marker_offd[i] = -1; } /* Compute num_cols_offd_C, A2C_offd, and B2C_offd*/ A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_union2(num_cols_offd_A, col_map_offd_A, num_cols_offd_B, col_map_offd_B, &num_cols_offd_C, col_map_offd_C, A2C_offd, B2C_offd); /* Set nonzero rows data of diag_C */ num_rownnz_diag_C = num_rows_diag_A; if ((num_rownnz_diag_A < num_rows_diag_A) && (num_rownnz_diag_B < num_rows_diag_B)) { hypre_MergeOrderedArrays( num_rownnz_diag_A, rownnz_diag_A, num_rownnz_diag_B, rownnz_diag_B, &num_rownnz_diag_C, &rownnz_diag_C); } /* Set diag_C */ for (i = 0; i < num_rownnz_diag_C; i++) { ii = rownnz_diag_C ? rownnz_diag_C[i] : i; HYPRE_Int diag_i_start = nnz_diag_C; for (j = A_diag_i[ii]; j < A_diag_i[ii+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = alpha * val; nnz_diag_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_diag_i[ii]; j < B_diag_i[ii+1]; j++) { HYPRE_Int col = B_diag_j[j]; HYPRE_Complex val = B_diag_a[j]; if (marker_diag[col] < diag_i_start /*&& hypre_abs(val) > 0.0*/) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = beta * val; nnz_diag_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(C_diag_j[p] == col); C_diag_a[p] += beta * val; } } C_diag_i[ii+1] = nnz_diag_C; } C_diag_i[num_rows_diag_A] = nnz_diag_C; /* Correct C_diag_i */ if (rownnz_diag_C) { for (i = 0; i < num_rownnz_diag_C - 1; i++) { for (ii = rownnz_diag_C[i] + 1; ii < rownnz_diag_C[i+1]; ii++) { C_diag_i[ii+1] = C_diag_i[rownnz_diag_C[i]+1]; } } if (num_rownnz_diag_C) { for (ii = rownnz_diag_C[num_rownnz_diag_C - 1] + 1; ii < num_rows_diag_A; ii++) { C_diag_i[ii+1] = C_diag_i[rownnz_diag_C[num_rownnz_diag_C - 1]+1]; } } } /* Set nonzero rows data of offd_C*/ num_rownnz_offd_C = num_rows_offd_A; if ((num_rownnz_offd_A < num_rows_offd_A) && (num_rownnz_offd_B < num_rows_offd_B)) { hypre_MergeOrderedArrays( num_rownnz_offd_A, rownnz_offd_A, num_rownnz_offd_B, rownnz_offd_B, &num_rownnz_offd_C, &rownnz_offd_C); } /* Set offd_C */ for (i = 0; i < num_rownnz_offd_C; i++) { ii = rownnz_offd_C ? rownnz_offd_C[i] : i; HYPRE_Int offd_i_start = nnz_offd_C; for (j = A_offd_i[ii]; j < A_offd_i[ii+1]; j++) { HYPRE_Int colA = A_offd_j[j]; HYPRE_Int colC = A2C_offd[colA]; HYPRE_Complex val = A_offd_a[j]; if (marker_offd[colC] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = alpha * val; nnz_offd_C++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_offd_i[ii]; j < B_offd_i[ii+1]; j++) { HYPRE_Int colB = B_offd_j[j]; HYPRE_Int colC = B2C_offd[colB]; HYPRE_Complex val = B_offd_a[j]; if (marker_offd[colC] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = beta * val; nnz_offd_C++; } else { /* existing entry, update */ HYPRE_Int p = marker_offd[colC]; hypre_assert(C_offd_j[p] == colC); C_offd_a[p] += beta * val; } } C_offd_i[ii+1] = nnz_offd_C; } C_offd_i[num_rows_diag_A] = nnz_offd_C; /* Correct C_offd_i */ if (rownnz_offd_C) { for (i = 0; i < num_rownnz_offd_C - 1; i++) { for (ii = rownnz_offd_C[i] + 1; ii < rownnz_offd_C[i+1]; ii++) { C_offd_i[ii+1] = C_offd_i[rownnz_offd_C[i]+1]; } } if (num_rownnz_offd_C) { for (ii = rownnz_offd_C[num_rownnz_offd_C - 1] + 1; ii < num_rows_diag_A; ii++) { C_offd_i[ii+1] = C_offd_i[rownnz_offd_C[num_rownnz_offd_C - 1]+1]; } } } row_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); memcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), 2*sizeof(HYPRE_BigInt)); memcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), 2*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix C */ C = hypre_ParCSRMatrixCreate(comm, num_rows_A, num_cols_A, row_starts_C, col_starts_C, num_cols_offd_C, nnz_diag_C, nnz_offd_C); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_a; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixNumRownnz(C_diag) = num_rownnz_diag_C; hypre_CSRMatrixRownnz(C_diag) = rownnz_diag_C; hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_CSRMatrixNumRownnz(C_offd) = num_rownnz_offd_C; hypre_CSRMatrixRownnz(C_offd) = rownnz_offd_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *Cout = C; /* Free memory */ hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFnorm *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsInit *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i-1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsWait *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractSubmatrixFC * * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts_in, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts; HYPRE_Int nf_local, nc_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs, *sub_idx_diag, *sub_idx_offd; HYPRE_Int num_sends, *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { /* copy cpts_starts first */ HYPRE_Int len; len = 2; cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); memcpy(cpts_starts, cpts_starts_in, len*sizeof(HYPRE_BigInt)); if (my_id == (num_procs -1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm); } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ B_first_col = B_col_starts[0]; /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k-1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
sharp_utils.c
#include "sharp_utils.h" #include <immintrin.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> int min(int a, int b) { return a < b ? a : b; } int max(int a, int b) { return a > b ? a : b; } void alm2cl_sp(sharp_alm_info * ainfo, float * alm1, float * alm2, float * cl) { int nthread = omp_get_max_threads(); int nl = ainfo->lmax+1; float * buf = calloc(nthread*nl, sizeof(float)); #pragma omp parallel { int id = omp_get_thread_num(); if(id == 0) { for(int l = 0; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[0]*2 + l*2; buf[nl*id+l] = alm1[i]*alm2[i]/2; } } #pragma omp for schedule(dynamic) for(int m = 1; m < ainfo->nm; m++) { for(int l = m; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[m]*2 + l*2; buf[nl*id+l] += alm1[i]*alm2[i] + alm1[i+1]*alm2[i+1]; } } #pragma omp barrier #pragma omp for for(int l = 0; l < nl; l++) { cl[l] = 0; for(int i = 0; i < nthread; i++) cl[l] += buf[nl*i+l]; cl[l] *= 2.0/(2*l+1); } } free(buf); } void alm2cl_dp(sharp_alm_info * ainfo, double * alm1, double * alm2, double * cl) { int nthread = omp_get_max_threads(); int nl = ainfo->lmax+1; double * buf = calloc(nthread*nl, sizeof(double)); #pragma omp parallel { int id = omp_get_thread_num(); if(id == 0) { for(int l = 0; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[0]*2 + l*2; buf[nl*id+l] = alm1[i]*alm2[i]/2; } } #pragma omp for schedule(dynamic) for(int m = 1; m < ainfo->nm; m++) { for(int l = m; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[m]*2 + l*2; buf[nl*id+l] += alm1[i]*alm2[i] + alm1[i+1]*alm2[i+1]; } } #pragma omp barrier #pragma omp for for(int l = 0; l < nl; l++) { cl[l] = 0; for(int i = 0; i < nthread; i++) cl[l] += buf[nl*i+l]; cl[l] *= 2.0/(2*l+1); } } free(buf); } #if 0 void alm2cl_plain_sp(sharp_alm_info * ainfo, float * alm, float * cl) { // Straightforward implementation. The baseline we will try to improve on. // We don't support stride or packing. for(int l = 0; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[0]*2 + l*2; float c = alm[i]*alm[i]; for(int mi = 1; mi < ainfo->nm; mi++) { if(ainfo->mval[mi] > l) continue; i = ainfo->mvstart[mi]*2 + l*2; c += 2*(alm[i]*alm[i] + alm[i+1]*alm[i+1]); } cl[l] = c/(2*l+1); } } void alm2cl_mmajor_sp(sharp_alm_info * ainfo, float * alm, float * cl) { for(int l = 0; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[0]*2 + l*2; cl[l] = alm[i]*alm[i]; } for(int mi = 1; mi < ainfo->nm; mi++) { int m = ainfo->mval[mi]; for(int l = m; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[mi]*2 + l*2; cl[l] += 2*(alm[i]*alm[i] + alm[i+1]*alm[i+1]); } } for(int l = 0; l <= ainfo->lmax; l++) { cl[l] /= (2*l+1); } } void alm2cl_mmajor2_sp(sharp_alm_info * ainfo, float * alm, float * cl) { int nthread = omp_get_max_threads(); int nl = ainfo->lmax+1; float * buf = calloc(nthread*nl, sizeof(float)); #pragma omp parallel { int id = omp_get_thread_num(); if(id == 0) { for(int l = 0; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[0]*2 + l*2; buf[nl*id+l] = alm[i]*alm[i]; } } #pragma omp for schedule(dynamic) for(int m = 1; m < ainfo->nm; m++) { for(int l = m; l <= ainfo->lmax; l++) { ptrdiff_t i = ainfo->mvstart[m]*2 + l*2; if(m == 0) buf[nl*id+l] += alm[i]*alm[i]; else buf[nl*id+l] += 2*(alm[i]*alm[i] + alm[i+1]*alm[i+1]); } } } #pragma omp parallel for for(int l = 0; l < nl; l++) { cl[l] = 0; for(int i = 0; i < nthread; i++) cl[l] += buf[nl*i+l]; cl[l] /= 2*l+1; } free(buf); } void alm2cl_avx_sp(sharp_alm_info * ainfo, float * alm, float * cl) { // AVX verison. Operate on 8 ls at a time. Simple l-major one first. // In theory this should be safe, as we're working on units larger than a // cache line. Fast operations require 32-byte alignment, but it's hard to // ensure that. That means that this won't be much faster than the automatic // version. I measure it to be 17% faster. #pragma omp parallel for schedule(dynamic) for(int l1 = 0; l1 <= ainfo->lmax; l1+=8) { int l2 = min(l1+8, ainfo->lmax+1); int nm_block; if(l2 == l1+8) { // We have a whole block worth of ls. Handle all our ls up to nm_block nm_block = min(l1+1, ainfo->nm); __m256 a1, a2, c = _mm256_setzero_ps(), fact, tmp; __m256i permutation = _mm256_set_epi32(7,6,3,2,5,4,1,0); for(int m = 0; m < nm_block; m++) { fact = _mm256_set1_ps(m?2:1); // Load our alms ptrdiff_t i = ainfo->mvstart[m]*2 + l1*2; a1 = _mm256_loadu_ps(alm + i + 0); a2 = _mm256_loadu_ps(alm + i + 8); a1 *= a1; a2 *= a2; tmp = _mm256_hadd_ps(a1, a2); // At this point we have 76325410, but we want 76543210 tmp = _mm256_permutevar8x32_ps(tmp, permutation); c += tmp*fact; } _mm256_storeu_ps(cl + l1, c); } else { // We're at the end of the ls, so we can't do blocks. This will // be handled by the general case below, but before that we need // to zero-initialize our values. We don't handle any ms in this case, nm_block = 0; for(int l = l1; l < l2; l++) cl[l] = 0; } // General case for(int m = nm_block; m < ainfo->nm; m++) { for(int l = max(m,l1); l < l2; l++) { ptrdiff_t i = ainfo->mvstart[m]*2 + l*2; cl[l] += (1+(m>0))*(alm[i]*alm[i]+alm[i+1]*alm[i+1]); } } } // And normalize for(int l = 0; l <= ainfo->lmax; l++) cl[l] /= 2*l+1; } #endif
common.c
#include "common.h" /* contains routines common to direct and indirect sparse solvers */ #define MIN_SCALE (1e-3) #define MAX_SCALE (1e3) #define NUM_SCALE_PASSES 1 /* additional passes don't help much */ scs_int copy_a_matrix(ScsMatrix **dstp, const ScsMatrix *src) { scs_int Anz = src->p[src->n]; ScsMatrix *A = scs_calloc(1, sizeof(ScsMatrix)); if (!A) { return 0; } A->n = src->n; A->m = src->m; A->x = scs_malloc(sizeof(scs_float) * Anz); /* A values, size: NNZ A */ A->i = scs_malloc(sizeof(scs_int) * Anz); /* A row index, size: NNZ A */ A->p = scs_malloc(sizeof(scs_int) * (src->n + 1)); /* A column pointer, size: n+1 */ if (!A->x || !A->i || !A->p) { return 0; } memcpy(A->x, src->x, sizeof(scs_float) * Anz); memcpy(A->i, src->i, sizeof(scs_int) * Anz); memcpy(A->p, src->p, sizeof(scs_int) * (src->n + 1)); *dstp = A; return 1; } scs_int validate_lin_sys(const ScsMatrix *A) { scs_int i, r_max, Anz; if (!A->x || !A->i || !A->p) { scs_printf("data incompletely specified\n"); return -1; } /* detects some errors in A col ptrs: */ for (i = 0; i < A->n; ++i) { if (A->p[i] == A->p[i + 1]) { scs_printf("WARN: A->p (column pointers) not strictly increasing, " "column %li empty\n", (long)i); } else if (A->p[i] > A->p[i + 1]) { scs_printf("ERROR: A->p (column pointers) decreasing\n"); return -1; } } Anz = A->p[A->n]; if (((scs_float)Anz / A->m > A->n) || (Anz <= 0)) { scs_printf("Anz (nonzeros in A) = %li, outside of valid range\n", (long)Anz); return -1; } r_max = 0; for (i = 0; i < Anz; ++i) { if (A->i[i] > r_max) { r_max = A->i[i]; } } if (r_max > A->m - 1) { scs_printf("number of rows in A inconsistent with input dimension\n"); return -1; } return 0; } void free_a_matrix(ScsMatrix *A) { if (A->x) { scs_free(A->x); } if (A->i) { scs_free(A->i); } if (A->p) { scs_free(A->p); } scs_free(A); } void print_a_matrix(const ScsMatrix *A) { scs_int i, j; /* TODO: this is to prevent clogging stdout */ if (A->p[A->n] < 2500) { scs_printf("\n"); for (i = 0; i < A->n; ++i) { scs_printf("Col %li: ", (long)i); for (j = A->p[i]; j < A->p[i + 1]; j++) { scs_printf("A[%li,%li] = %4f, ", (long)A->i[j], (long)i, A->x[j]); } scs_printf("norm col = %4f\n", calc_norm(&(A->x[A->p[i]]), A->p[i + 1] - A->p[i])); } scs_printf("norm A = %4f\n", calc_norm(A->x, A->p[A->n])); } } void normalize_a(ScsMatrix *A, const ScsSettings *stgs, const ScsCone *k, ScsScaling *scal) { scs_float *D = scs_malloc(A->m * sizeof(scs_float)); scs_float *E = scs_malloc(A->n * sizeof(scs_float)); scs_float *Dt = scs_malloc(A->m * sizeof(scs_float)); scs_float *Et = scs_malloc(A->n * sizeof(scs_float)); scs_float *nms = scs_calloc(A->m, sizeof(scs_float)); scs_float min_row_scale = MIN_SCALE * SQRTF((scs_float)A->n), max_row_scale = MAX_SCALE * SQRTF((scs_float)A->n); scs_float min_col_scale = MIN_SCALE * SQRTF((scs_float)A->m), max_col_scale = MAX_SCALE * SQRTF((scs_float)A->m); scs_int i, j, l, count, delta, *boundaries, c1, c2; scs_float wrk, e; scs_int num_boundaries = get_cone_boundaries(k, &boundaries); #if EXTRA_VERBOSE > 0 timer normalize_timer; scs_tic(&normalize_timer); scs_printf("normalizing A\n"); print_a_matrix(A); #endif for (l = 0; l < NUM_SCALE_PASSES; ++l) { memset(D, 0, A->m * sizeof(scs_float)); memset(E, 0, A->n * sizeof(scs_float)); /* calculate row norms */ for (i = 0; i < A->n; ++i) { c1 = A->p[i]; c2 = A->p[i + 1]; for (j = c1; j < c2; ++j) { wrk = A->x[j]; D[A->i[j]] += wrk * wrk; } } for (i = 0; i < A->m; ++i) { D[i] = SQRTF(D[i]); /* just the norms */ } /* mean of norms of rows across each cone */ count = boundaries[0]; for (i = 1; i < num_boundaries; ++i) { wrk = 0; delta = boundaries[i]; for (j = count; j < count + delta; ++j) { wrk += D[j]; } wrk /= delta; for (j = count; j < count + delta; ++j) { D[j] = wrk; } count += delta; } for (i = 0; i < A->m; ++i) { if (D[i] < min_row_scale) { D[i] = 1; } else if (D[i] > max_row_scale) { D[i] = max_row_scale; } } /* scale the rows with D */ for (i = 0; i < A->n; ++i) { for (j = A->p[i]; j < A->p[i + 1]; ++j) { A->x[j] /= D[A->i[j]]; } } /* calculate and scale by col norms, E */ for (i = 0; i < A->n; ++i) { c1 = A->p[i + 1] - A->p[i]; e = calc_norm(&(A->x[A->p[i]]), c1); if (e < min_col_scale) { e = 1; } else if (e > max_col_scale) { e = max_col_scale; } scale_array(&(A->x[A->p[i]]), 1.0 / e, c1); E[i] = e; } for (i = 0; i < A->m; ++i) { Dt[i] = (l == 0) ? D[i] : Dt[i] * D[i]; } for (i = 0; i < A->n; ++i) { Et[i] = (l == 0) ? E[i] : Et[i] * E[i]; } } scs_free(boundaries); scs_free(D); scs_free(E); /* calculate mean of row norms of A */ for (i = 0; i < A->n; ++i) { for (j = A->p[i]; j < A->p[i + 1]; ++j) { wrk = A->x[j]; nms[A->i[j]] += wrk * wrk; } } scal->mean_norm_row_a = 0.0; for (i = 0; i < A->m; ++i) { scal->mean_norm_row_a += SQRTF(nms[i]) / A->m; } scs_free(nms); /* calculate mean of col norms of A */ scal->mean_norm_col_a = 0.0; for (i = 0; i < A->n; ++i) { c1 = A->p[i + 1] - A->p[i]; scal->mean_norm_col_a += calc_norm(&(A->x[A->p[i]]), c1) / A->n; } /* scale up by d->SCALE if not equal to 1 */ if (stgs->scale != 1) { scale_array(A->x, stgs->scale, A->p[A->n]); } scal->D = Dt; scal->E = Et; #if EXTRA_VERBOSE > 0 scs_printf("finished normalizing A, time: %1.2es\n", tocq(&normalize_timer) / 1e3); print_a_matrix(A); #endif } void un_normalize_a(ScsMatrix *A, const ScsSettings *stgs, const ScsScaling *scal) { scs_int i, j; scs_float *D = scal->D; scs_float *E = scal->E; for (i = 0; i < A->n; ++i) { scale_array(&(A->x[A->p[i]]), E[i] / stgs->scale, A->p[i + 1] - A->p[i]); } for (i = 0; i < A->n; ++i) { for (j = A->p[i]; j < A->p[i + 1]; ++j) { A->x[j] *= D[A->i[j]]; } } } void _accum_by_atrans(scs_int n, scs_float *Ax, scs_int *Ai, scs_int *Ap, const scs_float *x, scs_float *y) { /* y += A'*x A in column compressed format parallelizes over columns (rows of A') */ scs_int p, j; scs_int c1, c2; scs_float yj; #if EXTRA_VERBOSE > 0 timer mult_by_atrans_timer; scs_tic(&mult_by_atrans_timer); #endif #ifdef _OPENMP #pragma omp parallel for private(p, c1, c2, yj) #endif for (j = 0; j < n; j++) { yj = y[j]; c1 = Ap[j]; c2 = Ap[j + 1]; for (p = c1; p < c2; p++) { yj += Ax[p] * x[Ai[p]]; } y[j] = yj; } #if EXTRA_VERBOSE > 0 scs_printf("mult By A trans time: %1.2es\n", tocq(&mult_by_atrans_timer) / 1e3); #endif } void _accum_by_a(scs_int n, scs_float *Ax, scs_int *Ai, scs_int *Ap, const scs_float *x, scs_float *y) { /*y += A*x A in column compressed format this parallelizes over columns and uses pragma atomic to prevent concurrent writes to y */ scs_int p, j; scs_int c1, c2; scs_float xj; #if EXTRA_VERBOSE > 0 timer mult_by_a_timer; scs_tic(&mult_by_a_timer); #endif /*#pragma omp parallel for private(p,c1,c2,xj) */ for (j = 0; j < n; j++) { xj = x[j]; c1 = Ap[j]; c2 = Ap[j + 1]; for (p = c1; p < c2; p++) { /*#pragma omp atomic */ y[Ai[p]] += Ax[p] * xj; } } #if EXTRA_VERBOSE > 0 scs_printf("mult By A time: %1.2es\n", tocq(&mult_by_a_timer) / 1e3); #endif }
zunglq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_unglq * * Generates an m-by-n matrix Q with orthonormal rows, which is * defined as the first m rows of a product of the elementary reflectors * returned by plasma_zgelqf. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix Q. m >= 0. * * @param[in] n * The number of columns of the matrix Q. n >= m. * * @param[in] k * The number of rows of elementary tile reflectors whose product * defines the matrix Q. * m >= k >= 0. * * @param[in] pA * Details of the LQ factorization of the original matrix A as returned * by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] T * Auxiliary factorization data, computed by plasma_zgelqf. * * @param[out] pQ * On exit, pointer to the m-by-n matrix Q. * * @param[in] ldq * The leading dimension of the array Q. ldq >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zunglq * @sa plasma_cunglq * @sa plasma_dorglq * @sa plasma_sorglq * @sa plasma_zgelqf * ******************************************************************************/ int plasma_zunglq(int m, int n, int k, plasma_complex64_t *pA, int lda, plasma_desc_t T, plasma_complex64_t *pQ, int ldq) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < m) { plasma_error("illegal value of n"); return -2; } if (k < 0 || k > m) { plasma_error("illegal value of k"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } if (ldq < imax(1, m)) { plasma_error("illegal value of ldq"); return -8; } // quick return if (m <= 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t Q; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, k, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, k, n, &Q); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmlq: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pQ, ldq, Q, &sequence, &request); // Call the tile async function. plasma_omp_zunglq(A, T, Q, work, &sequence, &request); // Translate Q back to LAPACK layout. plasma_omp_zdesc2ge(Q, pQ, ldq, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&Q); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_unglq * * Non-blocking tile version of plasma_zunglq(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_zgelqf. * * @param[out] Q * Descriptor of matrix Q. On exit, matrix Q stored in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zunglq * @sa plasma_omp_cunglq * @sa plasma_omp_dorglq * @sa plasma_omp_sorglq * @sa plasma_omp_zgelqf * ******************************************************************************/ void plasma_omp_zunglq(plasma_desc_t A, plasma_desc_t T, plasma_desc_t Q, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(Q) != PlasmaSuccess) { plasma_error("invalid Q"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (Q.m <= 0) return; // Set Q to identity. plasma_pzlaset(PlasmaGeneral, 0.0, 1.0, Q, sequence, request); // Construct Q. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunglq_tree(A, T, Q, work, sequence, request); } else { plasma_pzunglq(A, T, Q, work, sequence, request); } }
pr81887.c
/* PR c/81887 */ /* { dg-do compile } */ /* { dg-options "-fno-openmp -fopenmp-simd -fdump-tree-gimple" } */ /* { dg-final { scan-tree-dump-times "#pragma omp simd" 2 "gimple" } } */ /* { dg-final { scan-tree-dump-times "#pragma omp ordered simd\[ \t]*\[\n\r]" 2 "gimple" } } */ /* { dg-final { scan-tree-dump-times "#pragma omp" 4 "gimple" } } */ void f1 (int *x) { int i; #pragma omp simd for (i = 0; i < 100; i++) #pragma omp ordered simd x[i / 2] = i; } void f2 (int *x) { int i; #pragma omp parallel for simd ordered for (i = 0; i < 100; i++) #pragma omp ordered threads simd x[i / 2] = i; } void f3 (int *x) { int i; #pragma omp parallel for ordered for (i = 0; i < 100; i++) #pragma omp ordered x[i / 2] = i; } void f4 (int *x) { int i; #pragma omp parallel for ordered for (i = 0; i < 100; i++) #pragma omp ordered threads x[i / 2] = i; } void f5 (int n, int ***x) { int i, j, k; #pragma omp parallel for ordered(3) for (i=0; i < n; i++) for (j=0; j < n; ++j) for (k=0; k < n; ++k) { #pragma omp ordered depend(sink:i-8,j-2,k+2) depend(sink:i, j-1,k) depend(sink:i-4,j-3,k+6) depend(sink:i-6,j-4,k-6) x[i][j][k] = i + j + k; #pragma omp ordered depend(source) } }
HelloWorld.c
/* * HelloWorld.cpp * * Created on: 22 August 2016 * Author: Dr. Corneliu Arsene * * This software implements the Canonical Variates Analysis method (libhelloworld.jnilib file) for image * processing: it reads the multispectral images from the same directory as where the libhelloworld.jnilib * file is located and then produces a number of improved colour images in comparison to the original * multispectral images. * * * * * */ #include "jni.h" #include <stdio.h> #include "HelloWorld.h" #include <math.h> #include <assert.h> #include <dirent.h> #include </usr/local/Cellar/libiomp/20150701/include/libiomp/omp.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui.hpp> #include <opencv/cv.h> #include <opencv/highgui.h> #include <time.h> #include <string.h> #include <sys/stat.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <iostream> #include "gsl/matrix/gsl_matrix.h" #include "gsl/specfunc/gsl_sf_bessel.h" #include "gsl_math.h" #include "gsl/gsl_blas.h" #include <gsl/gsl_vector.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_eigen.h> #include <gsl/gsl_sort_vector.h> #include <gsl/eigen/gsl_eigen.h> using namespace cv; using namespace std; // * THIS FILE IS COMPILED TO A LIBRARY WHICH IS CALLED BY A JAVA LIBRARY JNIEXPORT void JNICALL Java_HelloWorld_print(JNIEnv *env, jobject thisObj){ int i,j,k; int grp1[9];//,grouping_vector[200] ; char *dir; int depth; dir ="/home"; depth=0; DIR * dp1,*dp3; struct dirent* ep,*ep3; char* text; char pattern[6]; char* text2; pattern[0]='\0'; pattern[1]='\0'; pattern[2]='\0'; pattern[3]='\0'; pattern[4]='\0'; pattern[5]='\0'; clock_t start, end; double cpu_time_used; start = clock(); // * DETECT HOW MANY CLASS TEXT FILES EXIST (E.G. CLASS MANUSCRIPT, CLASS UNDERWRITING, ETC) text="Class"; dp1 = opendir ("./"); int acindex,newacindex; acindex = 0; newacindex =0; while (ep = readdir (dp1)) { text2 = ep->d_name; pattern[0] = text2[0];//ep->d_name[0]; pattern[1] = text2[1];//ep->d_name[1]; pattern[2] = text2[2];//ep->d_name[2]; pattern[3] = text2[3];//ep->d_name[3]; pattern[4] = text2[4];//ep->d_name[4]; pattern[5] ='\0'; if( strcmp(text, pattern) == 0 ) { acindex = acindex +1; } } (void) closedir (dp1); // * ONCE I KNOW HOW MANY CLASSES THERE ARE THEN I MEMORIZE THE PATH TO THE RESPECTIVE CLASS FILES char arrayclass[acindex][2024]; dp3 = opendir ("./"); while (ep3 = readdir (dp3)) { text2 = ep3->d_name; pattern[0] = text2[0];//ep->d_name[0]; pattern[1] = text2[1];//ep->d_name[1]; pattern[2] = text2[2];//ep->d_name[2]; pattern[3] = text2[3];//ep->d_name[3]; pattern[4] = text2[4];//ep->d_name[4]; pattern[5] ='\0'; if( strcmp(text, pattern) == 0 ) { strcpy(arrayclass[newacindex],ep3->d_name); newacindex = newacindex +1; } } // * I AM PREPARING TO READ THE CLASS TEXT FILES WHICH I DETECTED ABOVE (void) closedir (dp3); char cwd[1024],cwdtemp[1024]; getcwd(cwd, sizeof(cwd)); strcat(cwd,"/"); FILE *file[acindex]; for (int j=0; j < acindex; j=j+1) { strcpy(cwdtemp,cwd); strcat(cwdtemp,arrayclass[j]); if((file[j] = fopen(cwdtemp, "r"))==NULL) /* open a text file for reading */ exit(1); printf("%s\n",cwdtemp); } int over,over1,over2,over3,over4,over5,over6; int xx=0; double dd=0; char str[256],str1[256],str2[256],str3[256],str4[256],str5[256],str6[256],str7[256],str8[256]; int classoverwriting[50][7]; double classoverwriting1[1][7]; int classunderwriting[50][7]; int classmanuscript[50][7]; int classbothuo[50][7]; int i1; int nrclass[acindex]; // * I AM DETECTING HOW MANY POINTS HAS EACH CLASS, FOR EXAMPLE CLASS MANUSCRIPT MIGHT HAVE 100 POINTS WHILE CLASS OVERWRITING MIGHT HAVE 120 POINTS, ETC for (int j=0; j < acindex; j=j+1) { fscanf(file[j], " %s %s %s %s %s %s \n", &str,&str1, &str2,&str3,&str4,&str5); i1=0; while (!feof(file[j])) { fscanf(file[j], " %lf %lf %lf %lf %lf %lf %lf\n", &classoverwriting1[1][1], &classoverwriting1[1][2], &classoverwriting1[1][3], &classoverwriting1[1][4], &classoverwriting1[1][5], &classoverwriting1[1][6], &classoverwriting1[1][7]); i1 =i1+1; } nrclass[j]=i1; } int sumtotalnrclass; sumtotalnrclass = 0; //exit(1); for (int j=0; j < acindex; j=j+1) { sumtotalnrclass = sumtotalnrclass + nrclass[j]; } int dummyvar; int **classtotal = new int*[sumtotalnrclass]; for (int i = 0; i < sumtotalnrclass; i++) { classtotal[i] = new int[7]; } printf(" \n"); double testclass1,testclass2; // * I AM READING THE POINTS FROM EACH CLASS FILES i1=0; for (int j=0; j < acindex; j=j+1) { fseek( file[j], 0, SEEK_SET ); fscanf(file[j], " %s %s %s %s %s %s \n", &str,&str1, &str2,&str3,&str4,&str5); //printf ("%s\n", str); while (!feof(file[j])) { fscanf(file[j], " %s %s %s %s %s %s %s\n", &str,&str1, &str2,&str3,&str4,&str5,&str6); testclass1 = atof(str5); testclass2 = atof(str6); classtotal[i1][6] = round(testclass1); classtotal[i1][7] = round(testclass2); i1 =i1+1; } fclose(file[j]); } char arraytiff[50][2024];//maximum number of images is 50 // * I AM DETECTING THE NUMBER OF MULTISPECTRAL IMAGES AS THERE COULD BE 10 , 16, 23 , ETC MULTISPECTRAL IMAGES DIR * dp2; struct dirent* ep2; char* text1; char pattern1[6]; char* text22; pattern1[0]='\0'; pattern1[1]='\0'; pattern1[2]='\0'; pattern1[3]='\0'; pattern1[4]='\0'; pattern1[5]='\0'; text1=".tif"; dp2 = opendir ("./"); int acindex1, sn1; acindex1 = 0; while (ep2 = readdir (dp2)) { text22 = ep2->d_name; sn1 = strlen(text22);;//sizeof(text22); pattern1[0] = text22[sn1-4];//ep->d_name[0]; pattern1[1] = text22[sn1-3];//ep->d_name[1]; pattern1[2] = text22[sn1-2];//ep->d_name[2]; pattern1[3] = text22[sn1-1];//ep->d_name[3]; pattern1[4] ='\0'; if( strcmp(text1, pattern1) == 0 ) { strcpy(arraytiff[acindex1],ep2->d_name); printf("%s\n",arraytiff[acindex1]); acindex1 = acindex1 +1; } } char tiffext[4]; char namefile[40]; strncpy(namefile,arraytiff[0],strlen(arraytiff[0])-4); printf(" TEST 3333333 \n "); // * I AM COPYING THE PATH TO THE MULTISPECTRAL IMAGES (void) closedir (dp2); char cwd1[1024],cwdtemp1[1024],*dfg; getcwd(cwd1, sizeof(cwd1)); strcat(cwd1,"/"); char arrayimage[acindex1][2024]; char *arrayimage2[acindex1]; printf("%s\n",cwd); printf("%s\n",cwd1); for (int j=0; j < acindex1; j=j+1) { strcpy(cwdtemp1,cwd1); strcat(cwdtemp1,arraytiff[j]); strcpy(arrayimage[j],cwdtemp1); printf("%s \n",arrayimage[j]); } double data_matrix[sumtotalnrclass][acindex1]; //#pragma omp parallel for private(j,i) for (int i=0; i < sumtotalnrclass; i=i+1) { for (int j=0; j < acindex1; j=j+1) { data_matrix[i][j] = 0; } } std::vector<Mat> image(acindex1); std::vector<Mat> various_images; // * I AM READING THE IMAGES AND I PUT THEM IN THE VECTOR OF IMAGES CALLED VARIOUS_IMAGES for (int i=0; i < acindex1; i=i+1) { image[i] = imread(arrayimage[i], 0); various_images.push_back(image[i]); } s1 = image[0].rows; s2 = image[0].cols; //int s1s2; s1s2 = s1*s2; if(! image[0].data ) // Check for invalid input { cout << "Could not open or find the image" << std::endl ; // return -1; } // p o u b int *grouping_vector = new int[sumtotalnrclass] ; for (int k = 0; k < sumtotalnrclass; k++) { grouping_vector[k] = 0; } int *grp = new int[sumtotalnrclass] ; for (int k = 0; k < sumtotalnrclass; k++) { grp[k] = 0; } // * FROM THE MULTISPECTRAL IMAGES I AM SELECTING THE POINTS BASED ON THE CLASS FILES int indexonc; for (int j = 0; j < acindex1; j++) { indexonc = 0; for (int k = 0; k < acindex; k++) { for (int ss = 0; ss < nrclass[k]; ss++) { data_matrix[indexonc][j] = (double) various_images[j].at<uchar>(classtotal[indexonc][7]-1,classtotal[indexonc][6]-1); indexonc = indexonc +1 ; } } } // * I AM COUNTING THE POINTS FROM EACH CLASS indexonc = 0; for (int k = 0; k < acindex; k=k+1) { for (int ss = 0; ss < nrclass[k]; ss=ss+1) { grouping_vector[indexonc] = 1+k; grp[indexonc]=1+k; indexonc = indexonc +1 ; } } printf(" \n "); double X[sumtotalnrclass][acindex1]; for (int i = 0; i < sumtotalnrclass; i++) { for (int j = 0; j < acindex1; j++) { X[i][j] = data_matrix[i][j]; } } int N,D,K; float nk[acindex1]; float alpha,xmg[acindex1],xmk[acindex][acindex1]; int index; float xdiff[sumtotalnrclass], transxdiff[sumtotalnrclass][1]; float sum[acindex1][acindex1], sumare; // * I AM IMPLEMENTING THE CANONICAL VARIATES ANALYSIS METHOD BASED ON THE MATLAB VERSION OF THIS METHOD N = sumtotalnrclass;//sizeof(X); D = acindex1;//sizeof(X[0][0])-1; for (j=0; j < acindex1; j=j+1) { xmg[j] = 0; } K = grp[0]; for (int j=1;j<sumtotalnrclass;j++) { if (K < grp[j]) { K = grp[j]; } } index=0; for (j=0;j<acindex1;j++) { alpha =0; for (i=0;i<sumtotalnrclass;i++) { alpha = alpha + X[i][j]; } xmg[j] = alpha/sumtotalnrclass; } for (i=0; i < K; i=i+1) { for (j=0; j < acindex1; j=j+1) { xmk[i][j] = 0; } } float **difference = new float*[acindex1]; for (int i = 0; i < acindex1; i++) { difference[i] = new float[acindex1]; } float **difference1 = new float*[acindex1]; for (int i = 0; i < acindex1; i++) { difference1[i] = new float[acindex1]; } float suma = 0; double Xinter[sumtotalnrclass][acindex1]; //printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads()); //#pragma omp parallel for /*#pragma omp parallel for for(int n=0; n<10; n++) { printf(" %d", n); } printf(".\n");*/ //#pragma omp parallel num_threads(5) //{ //#pragma omp parallel for //#pragma omp parallel for private(k,i,c) for (k=1;k < acindex+1; k++) { index = 0; for (i=0;i<sumtotalnrclass;i++) { if (grp[i] == k) { for (int c=0;c<acindex1;c++) { Xinter[index][c] = X[i][c]; } index = index +1; } } //printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads()); for (int c=0;c<acindex1;c++) { suma = 0; for (int d=0;d<nrclass[k-1];d++) suma = suma + Xinter[d][c]; xmk[k-1][c] = suma/nrclass[k-1]; } } //} float **S = new float*[D]; for (int i = 0; i < D; i++) { S[i] = new float[D]; } float **B = new float*[D]; for (int i = 0; i < D; i++) { B[i] = new float[D]; } float **xsumdiff = new float*[D]; for (int i = 0; i < D; i++) { xsumdiff[i] = new float[D]; } for (i=0; i < D; i=i+1) { for (j=0; j < D; j=j+1) { S[i][j] = 0; } } for (i=0; i < D; i=i+1) { for (j=0; j < D; j=j+1) { xsumdiff[i][j] = 0; } } //#pragma omp parallel for private(i,j,c) for (int i=0;i<sumtotalnrclass;i++) { for (int j=0;j<acindex1;j++) { index = grp[i]; xdiff[j] = X[i][j] - xmk[index-1][j] ; } for (int c = 0; c < acindex1; c++) { transxdiff[c][1] = xdiff[c]; } //multiply xdiff'*xdiff for (int c=0;c<acindex1;c++) { for (int d = 0; d < acindex1; d++) { xsumdiff[c][d] = transxdiff[c][1]*xdiff[d]; //printf ("%f \n", xsumdiff[c][d]); } } for (int c = 0; c < acindex1; c++) { for (int d = 0; d < acindex1; d++) { //printf ("%f \n", S[c][d]); S[c][d] = S[c][d] + xsumdiff[c][d]; } } } //#pragma omp parallel for private(i,j) for (i=0; i < acindex1; i=i+1) { for (j=0; j < acindex1; j=j+1) { S[i][j] = S[i][j]/(sumtotalnrclass-acindex); } } printf("\n"); //#pragma omp parallel for private(i,j) for (int i=0; i < D; i=i+1) { for (int j=0; j < D; j=j+1) { B[i][j] = 0; //printf("%f \n",B[i][j]); } } double Xtemp[sumtotalnrclass][acindex1]; //#pragma omp parallel for private(i) for(i = 0; i < sumtotalnrclass; i++) { // //printf("%3d: ", i); for(j = 0; j < acindex1; j++) Xtemp[i][j] = X[i][j]; } //nrclass acindex //#pragma omp parallel for private(k,i,d,c) for (k =0;k<acindex;k++) { for (i=0;i<acindex1;i++) { difference[1][i] = nrclass[k]*(xmk[k][i] - xmg[i]); difference1[1][i] = xmk[k][i] - xmg[i]; } for(int d = 0 ; d < acindex1 ; d++ ) transxdiff[d][1] = difference[1][d]; for (int d = 0; d < acindex1; d++) { for (int c = 0; c < acindex1; c++) { sum[c][d] = transxdiff[c][1]*difference1[1][d]; //printf("%f \n",sum[c][d]); } } for (int c=0; c < acindex1; c=c+1) { for (int d=0; d < acindex1; d=d+1) { B[c][d] = B[c][d]+sum[c][d]; // printf("%f ",B[c][d]); } printf(" \n"); } printf(" \n"); } double coefficient; double coefficient1; coefficient1 = acindex-1; coefficient = acindex/coefficient1; //printf(" %d \n",acindex); //printf(" %lf \n",coefficient); //printf(" %d \n",sumtotalnrclass); //#pragma omp parallel for private(d,c) for (int c=0; c < acindex1; c=c+1) { for (int d=0; d < acindex1; d=d+1) { B[c][d] = coefficient*B[c][d]/sumtotalnrclass; //printf("%f ",B[c][d]); } //printf(" \n"); } float vectork[acindex1]; float finalk =0; gsl_matrix * AA , *SS; gsl_vector * eval; gsl_matrix * evec; gsl_eigen_gensymmv_workspace * w; //, gsl_vector_complex * eval, gsl_matrix_complex * evec, gsl_eigen_nonsymmv_workspace * w int err; err = 1; // * I AM USING THE GSL GNU FUNCTIONS GSL_MATRIX_ALLOC , GSL_EIGEN_GENSYMMV_ALLOC , ETC TO CALCULATE THE EIGEN VECTORS AA = gsl_matrix_alloc(acindex1, acindex1); SS = gsl_matrix_alloc(acindex1, acindex1); eval = gsl_vector_alloc(acindex1); evec = gsl_matrix_alloc(acindex1, acindex1); w = gsl_eigen_gensymmv_alloc(acindex1); //#pragma omp parallel for private(i,j) for (int i = 0; i < acindex1; i++) for (int j = 0; j < acindex1; j++) gsl_matrix_set (AA, i, j, B[i][j]);// B[i][j]); for (int i = 0; i < acindex1; i++) for (int j = 0; j < acindex1; j++) gsl_matrix_set (SS, i, j, S[i][j]);// B[i][j]); int status; gsl_set_error_handler_off(); status = gsl_eigen_gensymmv( AA, SS, eval, evec, w) ; double a; int indexa[acindex1]; for (i = 0; i < acindex1; i++) { indexa[i] = i; } for (i = 0; i < acindex1; i++) { indexa[i]= i; } /*for(i = 0; i < 23; i++) { printf("%3d %f %i \n", i, gsl_vector_get(eval, i) ,indexa[i]); }*/ int prst; //#pragma omp parallel for private(i,j) for (i = 0; i < acindex1; i++) { //indexa[i]= i; for (j = i ; j < acindex1; j++) { if (gsl_vector_get(eval, i) < gsl_vector_get(eval, j)) { a = gsl_vector_get(eval, i); gsl_vector_set(eval, i, gsl_vector_get(eval, j) ); prst = indexa[i]; indexa[i]= indexa[j]; indexa[j]= prst; gsl_vector_set(eval, j, a); } else { //gsl_vector_set(eval, i, (-1)*gsl_vector_get(eval, i) ); } } } printf(" \n" ); printf(" EIGENVALUES \n "); for(i = 0; i < acindex1; i++) { printf("%3d %f %i \n", i, gsl_vector_get(eval, i) ,indexa[i]); } double summ = 0; double Xresult[sumtotalnrclass][acindex1]; double tempcoef[acindex1][acindex1]; for (i = 0; i < acindex1; i++) { for (j = 0; j < acindex1; j++) { tempcoef[i][j] = gsl_matrix_get(evec,i,j) ; } } printf(" \n" ); printf(" EIGEN VECTORS \n "); for(i = 0; i < acindex1; i++) { //printf("%3d: ", i); for(j = 0; j < acindex1; j++) printf("%lf ", gsl_matrix_get(evec,i,j) ); printf("\n"); } double coef[acindex1][acindex1]; for(i = 0; i < acindex1; i++) { //printf("%3d: ", i); for(j = 0; j < acindex1; j++) coef[i][j] = gsl_matrix_get(evec,i,j) ; } // * I AM CALCULATING THE NEW IMAGE POINTS BASED ON THE PARAMETERS CALCULATED BY THE GSL GNU FUNCTIONS //#pragma omp parallel for //#pragma omp parallel for private(k,i,j) for(i=0;i<sumtotalnrclass;i++) { for(j=0;j<acindex1;j++) { summ=0; for(k=0;k<acindex1;k++) { summ=summ+Xtemp[i][k]*gsl_matrix_get(evec, k, j); } Xresult[i][j]=summ; } } double** newmatrix1= new double*[s1]; for (int i = 0; i < s1; i++) { newmatrix1[i] = new double[s2]; } //static double newmatrix2[5412][7216]; double** newmatrix2= new double*[s1]; for (int i = 0; i < s1; i++) { newmatrix2[i] = new double[s2]; } //static double newmatrix3[5412][7216]; double** newmatrix3= new double*[s1]; for (int i = 0; i < s1; i++) { newmatrix3[i] = new double[s2]; } for(i = 0; i < s1; i++) { for(j = 0; j < s2; j++) { newmatrix1[i][j] = 0; } } for(i = 0; i < s1; i++) { for(j = 0; j < s2; j++) { newmatrix2[i][j] = 0; } } for(i = 0; i < s1; i++) { for(j = 0; j < s2; j++) { newmatrix3[i][j] = 0; } } double coef1[acindex1][acindex1]; double score1[sumtotalnrclass][acindex1]; //#pragma omp parallel for private(i,j) for (int i=0; i < sumtotalnrclass; i=i+1) { for (int j=0; j < acindex1; j=j+1) { score1[i][j] = 0; } } for (int i=0; i < acindex1; i=i+1) { for (int j=0; j < acindex1; j=j+1) { coef1[i][j] = 0; } } for (int i=0; i < sumtotalnrclass; i=i+1) { for (int j=0; j < acindex1; j=j+1) { score1[i][j] = Xresult[i][j]; } } for (int i=0; i < acindex1; i=i+1) { for (int j=0; j < acindex1; j=j+1) { coef1[i][j] = coef[i][j]; } } // * I AM IMPLEMENTING THE POST-PROCESSING WHICH EXIST IN THE ORIGINAL CANONICAL VARIATES ANALYSIS METHOD IMPLEMENTED INITIALLY IN MATLAB //#pragma omp parallel for private(j,i,k) for(i = 0; i < s1; i++) { for(j = 0; j < s2; j++) { for(k = 0; k < acindex1; k++) { newmatrix1[i][j] = newmatrix1[i][j] + ( (double)various_images[k].at<uchar>(i,j) )*coef1[k][0]; newmatrix2[i][j] = newmatrix2[i][j] + ( (double)various_images[k].at<uchar>(i,j) )*coef1[k][1]; newmatrix3[i][j] = newmatrix3[i][j] + ( (double)various_images[k].at<uchar>(i,j) )*coef1[k][2]; } } } double minimum1,minimum2,minimum3; double maximum1,maximum2,maximum3; double **range_cv1 = new double*[s1]; for (int i = 0; i < s1; i++) { range_cv1[i] = new double[s2]; } double **range_cv2 = new double*[s1]; for (int i = 0; i < s1; i++) { range_cv2[i] = new double[s2]; } double **range_cv3 = new double*[s1]; for (int i = 0; i < s1; i++) { range_cv3[i] = new double[s2]; } minimum1= newmatrix1[0][0]; for(int i=0; i<s1; i++) { for(int j=0; j<s2; j++) { if( newmatrix1[i][j]< minimum1) { minimum1=newmatrix1[i][j]; } } } minimum2= newmatrix2[0][0]; for(int i=0; i<s1; i++) { for(int j=0; j<s2; j++) { if( newmatrix2[i][j]< minimum2) { minimum2=newmatrix2[i][j]; } } } minimum3= newmatrix3[0][0]; for(int i=0; i<s1; i++) { for(int j=0; j<s2; j++) { if( newmatrix3[i][j]< minimum3) { minimum3=newmatrix3[i][j]; } } } maximum1= newmatrix1[0][0]; for(int i=0; i<s1; i++) { for(int j=0; j<s2; j++) { if( newmatrix1[i][j]> maximum1) { maximum1=newmatrix1[i][j]; } } } maximum2= newmatrix2[0][0]; for(int i=0; i<s1; i++) { for(int j=0; j<s2; j++) { if( newmatrix2[i][j]> maximum2) { maximum2=newmatrix2[i][j]; } } } maximum3= newmatrix3[0][0]; for(int i=0; i<s1; i++) { for(int j=0; j<s2; j++) { if( newmatrix3[i][j]> maximum3) { maximum3=newmatrix3[i][j]; } } } range_cv1 = range_map(newmatrix1, minimum1, maximum1); range_cv2 = range_map(newmatrix2, minimum2, maximum2); range_cv3 = range_map(newmatrix3, minimum3, maximum3); int aa,bb,cc; printf(" \n" ); printf(" A SET OF 8 PICTURES ARE PRODUCED "); cv::Mat img16(s1, s2, CV_8UC3); vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); for (int i = 0; i < s1; i++) { for (int j = 0; j < s2; j++) { img16.at<cv::Vec3b>(i,j)[1] = (255-range_cv2[i][j]); img16.at<cv::Vec3b>(i,j)[2] = (255-range_cv1[i][j]); img16.at<cv::Vec3b>(i,j)[3] = (255-range_cv3[i][j]); } } char fileword[100]; strcpy(fileword,namefile); strcat(fileword,"_image_rangecvasecond.tiff"); //end = clock(); //cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; //printf(" \n" ); //printf(" %lf ", cpu_time_used ); //exit(1); cv::imwrite(fileword, img16); cv::Mat img162(s1, s2, CV_8UC3); for (int i = 0; i < s1; i++) { for (int j = 0; j < s2; j++) { img162.at<cv::Vec3b>(i,j)[1] = range_cv2[i][j]; img162.at<cv::Vec3b>(i,j)[2] = range_cv1[i][j]; img162.at<cv::Vec3b>(i,j)[3] = range_cv3[i][j]; } } char fileword1[100]; strcpy(fileword1,namefile); strcat(fileword1,"_image_rangecva.tiff"); cv::imwrite(fileword1, img162); double minscore1,minscore2,minscore3; double maxscore1,maxscore2,maxscore3; minscore1=0; minscore1= score1[0][0]; for(int i=0; i<sumtotalnrclass; i++) { if( score1[i][0]< minscore1) { minscore1=score1[i][0]; } } minscore2==0; minscore2= score1[0][1]; for(int i=0; i<sumtotalnrclass; i++) { if( score1[i][1]< minscore2) { minscore2=score1[i][1]; } } minscore3==0; minscore3= score1[0][2]; for(int i=0; i<sumtotalnrclass; i++) { if( score1[i][2]< minscore3) { minscore3=score1[i][2]; } } maxscore1=0; maxscore1= score1[0][0]; for(int i=0; i<sumtotalnrclass; i++) { if( score1[i][0]> maxscore1) { maxscore1=score1[i][0]; } } maxscore2=0; maxscore2= score1[0][1]; for(int i=0; i<sumtotalnrclass; i++) { if( score1[i][1]> maxscore2) { maxscore2=score1[i][1]; } } maxscore3=0; maxscore3= score1[0][2]; for(int i=0; i<sumtotalnrclass; i++) { if( newmatrix3[i][2]> maxscore3) { maxscore3=score1[i][2]; } } double **score_cv1 = new double*[s1]; for (int i = 0; i < s1; i++) { score_cv1[i] = new double[s2]; } double **score_cv2 = new double*[s1]; for (int i = 0; i < s1; i++) { score_cv2[i] = new double[s2]; } double **score_cv3 = new double*[s1]; for (int i = 0; i < s1; i++) { score_cv3[i] = new double[s2]; } score_cv1 = range_map(newmatrix1, minscore1, maxscore1); score_cv2 = range_map(newmatrix2, minscore2, maxscore2); score_cv3 = range_map(newmatrix3, minscore3, maxscore3); cv::Mat img163(s1, s2, CV_8UC3); for (int i = 0; i < s1; i++) { for (int j = 0; j < s2; j++) { img163.at<cv::Vec3b>(i,j)[1] = score_cv2[i][j]; img163.at<cv::Vec3b>(i,j)[2] = score_cv1[i][j]; img163.at<cv::Vec3b>(i,j)[3] = score_cv3[i][j]; } } char fileword2[100]; strcpy(fileword2,namefile); strcat(fileword2,"_image_score_rangecva.tiff"); cv::imwrite(fileword2, img163); cv::Mat img164(s1, s2, CV_8UC3); for (int i = 0; i < s1; i++) { for (int j = 0; j < s2; j++) { img164.at<cv::Vec3b>(i,j)[1] = (255-score_cv2[i][j]); img164.at<cv::Vec3b>(i,j)[2] = (255-score_cv1[i][j]); img164.at<cv::Vec3b>(i,j)[3] = (255-score_cv3[i][j]); } } char fileword3[100]; strcpy(fileword3,namefile); strcat(fileword3,"_image_score_rangecvasecond.tiff"); cv::imwrite(fileword3, img164); // * I AM PRODUCING THE 8 BIT IMAGES OF THE PERCENTILE double required_percentiles[8]; required_percentiles[0] = 0.01; required_percentiles[1] = 0.1; required_percentiles[2] = 1; required_percentiles[3] = 5; required_percentiles[4] = 99.99; required_percentiles[5] = 99.9; required_percentiles[6] = 99; required_percentiles[7] = 95; int sizerows,sizecolumns; int n_percentiles; n_percentiles = (int) 8 / 2; double *percentiles_cv1 = new double[5]; double *percentiles_cv2 = new double[5]; double *percentiles_cv3 = new double[5]; double *percentiles_cv4 = new double[5]; double *percentiles_cv5 = new double[5]; for (int i = 0; i < 5; i++) { percentiles_cv1[i] = 0; percentiles_cv2[i] = 0; percentiles_cv3[i] = 0; percentiles_cv4[i] = 0; percentiles_cv5[i] = 0; } percentiles_cv1 = Percentile(newmatrix1, required_percentiles); percentiles_cv2 = Percentile(newmatrix2, required_percentiles); percentiles_cv3 = Percentile(newmatrix3, required_percentiles); double **per_cv1 = new double*[s1]; for (int i = 0; i < s1; i++) { per_cv1[i] = new double[s2]; } double **per_cv2 = new double*[s1]; for (int i = 0; i < s1; i++) { per_cv2[i] = new double[s2]; } double **per_cv3 = new double*[s1]; for (int i = 0; i < s1; i++) { per_cv3[i] = new double[s2]; } cv::Mat img165(s1, s2, CV_8UC3); char str22[35]; for (int i = 0; i < n_percentiles ; i++) { per_cv1 = range_map(newmatrix1,percentiles_cv1[i] , percentiles_cv1[i+n_percentiles]); per_cv2 = range_map(newmatrix2, percentiles_cv2[i], percentiles_cv2[i+n_percentiles]); per_cv3 = range_map(newmatrix3, percentiles_cv3[i], percentiles_cv3[i+n_percentiles]); for (int i = 0; i < s1; i++) { for (int j = 0; j < s2; j++) { img165.at<cv::Vec3b>(i,j)[1] = per_cv2[i][j]; img165.at<cv::Vec3b>(i,j)[2] = per_cv1[i][j]; img165.at<cv::Vec3b>(i,j)[3] = per_cv3[i][j]; } } sprintf(str22,"_image_percentile_%lfcva.tiff", required_percentiles[i]); strcpy(fileword3,namefile); strcat(fileword3,str22); cv::imwrite(fileword3, img165); } end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf(" %lf ", cpu_time_used ); return; } //double* Percentile(double arr[5412][7216], double percentile[8]) double* Percentile(double** arr, double percentile[8]) { int index,size1; index =0; double *resultingvector = new double[s1s2]; size1 = sizeof(percentile); for(int i = 0; i < s1; i++) { for(int j = 0; j < s2; j++) { resultingvector[index] = arr[i][j]; index = index +1; } } double aa; gsl_vector * v = gsl_vector_alloc (s1s2); for (int i = 0; i < s1s2; i++) { gsl_vector_set (v, i,resultingvector[i] ); } gsl_sort_vector(v); for (int i = 0; i < s1s2; i++) { resultingvector[i] = gsl_vector_get (v, i); } int index2; double *percentile_values = new double[size1]; for(int i = 0; i < size1; i++) { index2 = round(1 + (percentile[i]/100)*(s1s2-1)); percentile_values[i] = resultingvector[index2]; } return percentile_values; } int compare_doubles (const double * a, const double * b) { if (*a > *b) return 1; else if (*a < *b) return -1; else return 0; } double* quickSort( double a[], long l, long r) { long j; //r = 39052992; if( l < r ) { // divide and conquer j = partition( a, l, r); a = quickSort( a, l, j-1); a = quickSort( a, j+1, r); } //for(int i = 0; i < 39052992; i++) //{ printf(" %d \n", j ); //} return a; } long partition(double a[], long l, long r) { long i, j; double pivot,t; pivot = a[l]; i = l; j = r+1; while( 1) { do ++i; while( a[i] <= pivot && i <= r ); do --j; while( a[j] > pivot ); if( i >= j ) break; t = a[i]; a[i] = a[j]; a[j] = t; } t = a[l]; a[l] = a[j]; a[j] = t; return j; } //double** range_map(double newmatrix[5412][7216], double minimum, double maximum) double** range_map(double** newmatrix, double minimum, double maximum) { //static double newimage[5412][7216]; double **newimage = new double*[s1]; for (int i = 0; i < s1; i++) { newimage[i] = new double[s2]; } double out_low, out_high, out_range_low, out_range_high,v,v2; out_low = 0; out_high =255; out_range_low = 0; out_range_high = 255; double in_low,in_high; in_low = minimum; in_high = maximum; for(int i = 0; i < s1; i++) { for(int j = 0; j < s2; j++) { v = newmatrix[i][j]; if (v < in_low) v2 = out_range_low; else if (v > in_high) v2 = out_range_high; else v2 = ((v - in_low) / (in_high - in_low)) * (out_high - out_low) + out_low; //newimage[i][j] = uint8(floor(v2 + 0.5)); newimage[i][j] = uint8_t(floor(v2+0.5)); } } return newimage; }
GB_binop__copysign_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__copysign_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__copysign_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__copysign_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__copysign_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__copysign_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__copysign_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__copysign_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__copysign_fp32) // C=scalar+B GB (_bind1st__copysign_fp32) // C=scalar+B' GB (_bind1st_tran__copysign_fp32) // C=A+scalar GB (_bind2nd__copysign_fp32) // C=A'+scalar GB (_bind2nd_tran__copysign_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = copysignf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = copysignf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COPYSIGN || GxB_NO_FP32 || GxB_NO_COPYSIGN_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__copysign_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__copysign_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__copysign_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__copysign_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__copysign_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = copysignf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__copysign_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = copysignf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = copysignf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__copysign_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = copysignf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sptensor.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "sptensor.h" #include "matrix.h" #include "sort.h" #include "io.h" #include "timer.h" #include <math.h> /****************************************************************************** * PRIVATE FUNCTONS *****************************************************************************/ static inline int p_same_coord( sptensor_t const * const tt, idx_t const i, idx_t const j) { idx_t const nmodes = tt->nmodes; if(nmodes == 3) { return (tt->ind[0][i] == tt->ind[0][j]) && (tt->ind[1][i] == tt->ind[1][j]) && (tt->ind[2][i] == tt->ind[2][j]); } else { for(idx_t m=0; m < nmodes; ++m) { if(tt->ind[m][i] != tt->ind[m][j]) { return 0; } } return 1; } } /** * @brief Allocate an empty coordinate tensor structure. * * @return The allocated structure. */ splatt_coord * p_alloc_coord_empty() { splatt_coord * coord = splatt_malloc(sizeof(*coord)); /* initialize values */ coord->nmodes = 0; for(idx_t m=0; m < MAX_NMODES; ++m) { coord->dims[m] = 0; } coord->nnz = 0; coord->vals = NULL; for(idx_t m=0; m < MAX_NMODES; ++m) { coord->ind[m] = NULL; coord->indmap[m] = NULL; } return coord; } /****************************************************************************** * PUBLIC FUNCTONS *****************************************************************************/ val_t tt_normsq(sptensor_t const * const tt) { val_t norm = 0.0; val_t const * const restrict tv = tt->vals; for(idx_t n=0; n < tt->nnz; ++n) { norm += tv[n] * tv[n]; } return norm; } double tt_density( sptensor_t const * const tt) { double root = pow((double)tt->nnz, 1./(double)tt->nmodes); double density = 1.0; for(idx_t m=0; m < tt->nmodes; ++m) { density *= root / (double)tt->dims[m]; } return density; } idx_t * tt_get_slices( sptensor_t const * const tt, idx_t const m, idx_t * nunique) { /* get maximum number of unique slices */ idx_t minidx = tt->dims[m]; idx_t maxidx = 0; idx_t const nnz = tt->nnz; idx_t const * const inds = tt->ind[m]; /* find maximum number of uniques */ for(idx_t n=0; n < nnz; ++n) { minidx = SS_MIN(minidx, inds[n]); maxidx = SS_MAX(maxidx, inds[n]); } /* +1 because maxidx is inclusive, not exclusive */ idx_t const maxrange = 1 + maxidx - minidx; /* mark slices which are present and count uniques */ idx_t * slice_mkrs = calloc(maxrange, sizeof(*slice_mkrs)); idx_t found = 0; for(idx_t n=0; n < nnz; ++n) { assert(inds[n] >= minidx); idx_t const idx = inds[n] - minidx; if(slice_mkrs[idx] == 0) { slice_mkrs[idx] = 1; ++found; } } *nunique = found; /* now copy unique slices */ idx_t * slices = splatt_malloc(found * sizeof(*slices)); idx_t ptr = 0; for(idx_t i=0; i < maxrange; ++i) { if(slice_mkrs[i] == 1) { slices[ptr++] = i + minidx; } } free(slice_mkrs); return slices; } idx_t * tt_get_hist( sptensor_t const * const tt, idx_t const mode) { idx_t * restrict hist = splatt_malloc(tt->dims[mode] * sizeof(*hist)); memset(hist, 0, tt->dims[mode] * sizeof(*hist)); idx_t const * const restrict inds = tt->ind[mode]; #pragma omp parallel for schedule(static) for(idx_t x=0; x < tt->nnz; ++x) { #pragma omp atomic ++hist[inds[x]]; } return hist; } idx_t tt_remove_dups( sptensor_t * const tt) { tt_sort(tt, 0, NULL); idx_t const nmodes = tt->nmodes; idx_t newnnz = 0; for(idx_t nnz = 1; nnz < tt->nnz; ++nnz) { /* if the two nnz are the same, average them */ if(p_same_coord(tt, newnnz, nnz)) { tt->vals[newnnz] += tt->vals[nnz]; } else { /* new another nnz */ ++newnnz; for(idx_t m=0; m < nmodes; ++m) { tt->ind[m][newnnz] = tt->ind[m][nnz]; } tt->vals[newnnz] = tt->vals[nnz]; } } ++newnnz; idx_t const diff = tt->nnz - newnnz; tt->nnz = newnnz; return diff; } idx_t tt_remove_empty( sptensor_t * const tt) { idx_t dim_sizes[MAX_NMODES]; idx_t nremoved = 0; /* Allocate indmap */ idx_t const nmodes = tt->nmodes; idx_t const nnz = tt->nnz; idx_t maxdim = 0; for(idx_t m=0; m < tt->nmodes; ++m) { maxdim = tt->dims[m] > maxdim ? tt->dims[m] : maxdim; } /* slice counts */ idx_t * scounts = splatt_malloc(maxdim * sizeof(*scounts)); for(idx_t m=0; m < nmodes; ++m) { dim_sizes[m] = 0; memset(scounts, 0, maxdim * sizeof(*scounts)); /* Fill in indmap */ for(idx_t n=0; n < tt->nnz; ++n) { /* keep track of #unique slices */ if(scounts[tt->ind[m][n]] == 0) { scounts[tt->ind[m][n]] = 1; ++dim_sizes[m]; } } /* move on if no remapping is necessary */ if(dim_sizes[m] == tt->dims[m]) { tt->indmap[m] = NULL; continue; } nremoved += tt->dims[m] - dim_sizes[m]; /* Now scan to remove empty slices */ idx_t ptr = 0; for(idx_t i=0; i < tt->dims[m]; ++i) { if(scounts[i] == 1) { scounts[i] = ptr++; } } tt->indmap[m] = splatt_malloc(dim_sizes[m] * sizeof(**tt->indmap)); /* relabel all indices in mode m */ tt->dims[m] = dim_sizes[m]; for(idx_t n=0; n < tt->nnz; ++n) { idx_t const global = tt->ind[m][n]; idx_t const local = scounts[global]; assert(local < dim_sizes[m]); tt->indmap[m][local] = global; /* store local -> global mapping */ tt->ind[m][n] = local; } } splatt_free(scounts); return nremoved; } /****************************************************************************** * PUBLIC FUNCTONS *****************************************************************************/ sptensor_t * tt_read( char const * const ifname) { return tt_read_file(ifname); } /* * XXX remove */ sptensor_t * tt_alloc( idx_t const nnz, idx_t const nmodes) { return splatt_alloc_coord(nmodes, nnz); } void tt_free( sptensor_t * tt) { if(tt == NULL) { return; } splatt_free(tt->vals); for(idx_t m=0; m < tt->nmodes; ++m) { splatt_free(tt->ind[m]); splatt_free(tt->indmap[m]); } splatt_free(tt); } spmatrix_t * tt_unfold( sptensor_t * const tt, idx_t const mode) { idx_t nrows = tt->dims[mode]; idx_t ncols = 1; for(idx_t m=1; m < tt->nmodes; ++m) { ncols *= tt->dims[(mode + m) % tt->nmodes]; } /* sort tt */ tt_sort(tt, mode, NULL); /* allocate and fill matrix */ spmatrix_t * mat = spmat_alloc(nrows, ncols, tt->nnz); idx_t * const rowptr = mat->rowptr; idx_t * const colind = mat->colind; val_t * const mvals = mat->vals; /* make sure to skip ahead to the first non-empty slice */ idx_t row = 0; for(idx_t n=0; n < tt->nnz; ++n) { /* increment row and account for possibly empty ones */ while(row <= tt->ind[mode][n]) { rowptr[row++] = n; } mvals[n] = tt->vals[n]; idx_t col = 0; idx_t mult = 1; for(idx_t m = 0; m < tt->nmodes; ++m) { idx_t const off = tt->nmodes - 1 - m; if(off == mode) { continue; } col += tt->ind[off][n] * mult; mult *= tt->dims[off]; } colind[n] = col; } /* account for any empty rows at end, too */ for(idx_t r=row; r <= nrows; ++r) { rowptr[r] = tt->nnz; } return mat; } /****************************************************************************** * API FUNCTONS *****************************************************************************/ splatt_coord * splatt_alloc_coord( splatt_idx_t const num_modes, splatt_idx_t const nnz) { splatt_coord * coord = p_alloc_coord_empty(); coord->nmodes = num_modes; coord->nnz = nnz; coord->vals = splatt_malloc(nnz * sizeof(*coord->vals)); for(idx_t m=0; m < num_modes; ++m) { coord->ind[m] = splatt_malloc(nnz * sizeof(**coord->ind)); } /* XXX */ coord->tiled = SPLATT_NOTILE; return coord; } void splatt_free_coord( splatt_coord * coord) { if(coord == NULL) { return; } splatt_free(coord->vals); for(idx_t m=0; m < MAX_NMODES; ++m) { splatt_free(coord->ind[m]); splatt_free(coord->indmap[m]); } splatt_free(coord); } splatt_error_type splatt_coord_load( char const * const fname, splatt_coord * const coord) { /* XXX */ return SPLATT_ERROR_BADINPUT; } /* * Accessors */ splatt_idx_t splatt_coord_get_nnz( splatt_coord const * const coord) { if(coord == NULL) { fprintf(stderr, "SPLATT ERROR: splatt_coord_get_nnz() NULL tensor.\n"); return 0; } return coord->nnz; } splatt_idx_t splatt_coord_get_modes( splatt_coord const * const coord) { if(coord == NULL) { fprintf(stderr, "SPLATT ERROR: splatt_coord_get_modes() NULL tensor.\n"); return 0; } return coord->nmodes; } splatt_idx_t * splatt_coord_get_inds( splatt_coord const * const coord, splatt_idx_t const mode) { /* sanity check */ if(mode >= coord->nmodes) { fprintf(stderr, "SPLATT ERROR: splatt_coord_get_inds() invalid mode:" " (%"SPLATT_PF_IDX" of only %"SPLATT_PF_IDX" modes).\n", mode, coord->nmodes); return NULL; } assert(coord->ind[mode] != NULL); return coord->ind[mode]; } splatt_val_t * splatt_coord_get_vals( splatt_coord const * const coord) { /* sanity check */ if(coord->nmodes == 0) { fprintf(stderr, "SPLATT ERROR: splatt_coord_get_vals(): empty tensor.\n"); return NULL; } assert(coord->vals != NULL); return coord->vals; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-124,128)),ceild(32*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(8*t1+Nx+13,128)),floord(16*t2+Nx+12,128)),floord(32*t3+Nx+28,128)),floord(16*t1-16*t2+Nz+Nx+11,128));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),128*t4+126),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
cpd.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include <ParTI.h> #ifdef PARTI_USE_OPENMP #include <omp.h> #endif #include "../src/sptensor/hicoo/hicoo.h" void print_usage(char ** argv) { printf("Usage: %s [options] \n\n", argv[0]); printf("Options: -i INPUT, --input=INPUT (.tns file)\n"); printf(" -o OUTPUT, --output=OUTPUT (output file name)\n"); printf(" -d DEV_ID, --dev-id=DEV_ID (-2:sequential,default; -1:OpenMP parallel)\n"); printf(" -r RANK (CPD rank, 16:default)\n"); printf(" OpenMP options: \n"); printf(" -t NTHREADS, --nt=NT (1:default)\n"); printf(" -u use_reduce, --ur=use_reduce (use privatization or not)\n"); printf(" --help\n"); printf("\n"); } int main(int argc, char ** argv) { FILE *fi = NULL, *fo = NULL; sptSparseTensor X; sptIndex R = 16; sptIndex niters = 5; // 50 double tol = 1e-5; sptKruskalTensor ktensor; int nloops = 5; int dev_id = -2; int nthreads = 1; int use_reduce = 0; if(argc < 2) { print_usage(argv); exit(1); } int c; for(;;) { static struct option long_options[] = { {"input", required_argument, 0, 'i'}, {"output", optional_argument, 0, 'o'}, {"dev-id", optional_argument, 0, 'd'}, {"rank", optional_argument, 0, 'r'}, {"nt", optional_argument, 0, 't'}, {"use-reduce", optional_argument, 0, 'u'}, {"help", no_argument, 0, 0}, {0, 0, 0, 0} }; int option_index = 0; c = getopt_long(argc, argv, "i:o:d:r:t:u:", long_options, &option_index); if(c == -1) { break; } switch(c) { case 'i': fi = fopen(optarg, "r"); sptAssert(fi != NULL); printf("input file: %s\n", optarg); fflush(stdout); break; case 'o': fo = fopen(optarg, "w"); sptAssert(fo != NULL); printf("output file: %s\n", optarg); fflush(stdout); break; case 'd': sscanf(optarg, "%d", &dev_id); break; case 'r': sscanf(optarg, "%u"PARTI_SCN_INDEX, &R); break; case 'u': sscanf(optarg, "%d", &use_reduce); break; case 't': sscanf(optarg, "%d", &nthreads); break; case '?': /* invalid option */ case 'h': default: print_usage(argv); exit(1); } } printf("dev_id: %d\n", dev_id); sptAssert(sptLoadSparseTensor(&X, 1, fi) == 0); fclose(fi); sptSparseTensorStatus(&X, stdout); // sptDumpSparseTensor(&X, 0, stdout); sptIndex nmodes = X.nmodes; sptNewKruskalTensor(&ktensor, nmodes, X.ndims, R); /* For warm-up caches, timing not included */ if(dev_id == -2) { nthreads = 1; sptAssert(sptCpdAls(&X, R, niters, tol, &ktensor) == 0); } else if(dev_id == -1) { omp_set_num_threads(nthreads); #pragma omp parallel { nthreads = omp_get_num_threads(); } printf("nthreads: %d\n", nthreads); printf("use_reduce: %d\n", use_reduce); sptAssert(sptOmpCpdAls(&X, R, niters, tol, nthreads, use_reduce, &ktensor) == 0); } if(fo != NULL) { // Dump ktensor to files sptAssert( sptDumpKruskalTensor(&ktensor, fo) == 0 ); fclose(fo); } sptFreeSparseTensor(&X); sptFreeKruskalTensor(&ktensor); return 0; }
2875.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ #pragma omp target teams distribute thread_limit(32) schedule(dynamic, 28) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp target teams distribute thread_limit(32) schedule(dynamic, 28) for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp target teams distribute thread_limit(32) schedule(dynamic, 28) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
rhs-brisbane.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include <math.h> #include "header-brisbane.h" void compute_rhs() { int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; int gp0, gp1, gp2; gp0 = grid_points[0]; gp1 = grid_points[1]; gp2 = grid_points[2]; //--------------------------------------------------------------------- // compute the reciprocal of density, and the kinetic energy, // and the speed of sound. //--------------------------------------------------------------------- //#pragma omp target //present(rho_i,u,qs,square,speed,rhs,forcing,us,vs,ws) //{ /*get the value of rho_i,qs,square,us,vs,ws,speed*/ size_t kernel_compute_rhs_0_off[3] = { 0, 0, 0 }; size_t kernel_compute_rhs_0_idx[3] = { gp0, gp1, gp2 }; brisbane_kernel kernel_compute_rhs_0; brisbane_kernel_create("compute_rhs_0", &kernel_compute_rhs_0); brisbane_kernel_setmem(kernel_compute_rhs_0, 0, mem_u, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_0, 1, mem_rho_i, brisbane_w); brisbane_kernel_setmem(kernel_compute_rhs_0, 2, mem_us, brisbane_w); brisbane_kernel_setmem(kernel_compute_rhs_0, 3, mem_vs, brisbane_w); brisbane_kernel_setmem(kernel_compute_rhs_0, 4, mem_ws, brisbane_w); brisbane_kernel_setmem(kernel_compute_rhs_0, 5, mem_square, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_0, 6, mem_qs, brisbane_w); brisbane_kernel_setmem(kernel_compute_rhs_0, 7, mem_speed, brisbane_w); brisbane_kernel_setarg(kernel_compute_rhs_0, 8, sizeof(double), &c1c2); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_compute_rhs_0, 3, kernel_compute_rhs_0_off, kernel_compute_rhs_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(rho_inv,aux,i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd private(rho_inv,aux) collapse(3) #endif for (k = 0; k <= gp2-1; k++) { for (j = 0; j <= gp1-1; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(rho_inv,aux) #endif for (i = 0; i <= gp0-1; i++) { rho_inv = 1.0/u[0][k][j][i]; rho_i[k][j][i] = rho_inv; us[k][j][i] = u[1][k][j][i] * rho_inv; vs[k][j][i] = u[2][k][j][i] * rho_inv; ws[k][j][i] = u[3][k][j][i] * rho_inv; square[k][j][i] = 0.5* ( u[1][k][j][i]*u[1][k][j][i] + u[2][k][j][i]*u[2][k][j][i] + u[3][k][j][i]*u[3][k][j][i] ) * rho_inv; qs[k][j][i] = square[k][j][i] * rho_inv; //------------------------------------------------------------------- // (don't need speed and ainx until the lhs computation) //------------------------------------------------------------------- aux = c1c2*rho_inv* (u[4][k][j][i] - square[k][j][i]); speed[k][j][i] = sqrt(aux); } } } #endif //--------------------------------------------------------------------- // copy the exact forcing term to the right hand side; because // this forcing term is known, we can store it on the whole grid // including the boundary //--------------------------------------------------------------------- size_t kernel_compute_rhs_1_off[2] = { 0, 0 }; size_t kernel_compute_rhs_1_idx[2] = { gp1, gp2 }; brisbane_kernel kernel_compute_rhs_1; brisbane_kernel_create("compute_rhs_1", &kernel_compute_rhs_1); brisbane_kernel_setmem(kernel_compute_rhs_1, 0, mem_rhs, brisbane_w); brisbane_kernel_setmem(kernel_compute_rhs_1, 1, mem_forcing, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_1, 2, sizeof(int), &gp0); brisbane_task task1; brisbane_task_create(&task1); brisbane_task_kernel(task1, kernel_compute_rhs_1, 2, kernel_compute_rhs_1_off, kernel_compute_rhs_1_idx); brisbane_task_submit(task1, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,k,m) collapse(2) for (k = 0; k <= gp2-1; k++) { for (j = 0; j <= gp1-1; j++) { for (i = 0; i <= gp0-1; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = forcing[m][k][j][i]; } } } } #endif //--------------------------------------------------------------------- // compute xi-direction fluxes //--------------------------------------------------------------------- size_t kernel_compute_rhs_2_off[3] = { 1, 1, 1 }; size_t kernel_compute_rhs_2_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_compute_rhs_2; brisbane_kernel_create("compute_rhs_2", &kernel_compute_rhs_2); brisbane_kernel_setmem(kernel_compute_rhs_2, 0, mem_us, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_2, 1, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_2, 2, mem_u, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_2, 3, mem_square, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_2, 4, mem_vs, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_2, 5, mem_ws, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_2, 6, mem_qs, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_2, 7, mem_rho_i, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_2, 8, sizeof(double), &dx1tx1); brisbane_kernel_setarg(kernel_compute_rhs_2, 9, sizeof(double), &dx2tx1); brisbane_kernel_setarg(kernel_compute_rhs_2, 10, sizeof(double), &dx3tx1); brisbane_kernel_setarg(kernel_compute_rhs_2, 11, sizeof(double), &dx4tx1); brisbane_kernel_setarg(kernel_compute_rhs_2, 12, sizeof(double), &dx5tx1); brisbane_kernel_setarg(kernel_compute_rhs_2, 13, sizeof(double), &tx2); brisbane_kernel_setarg(kernel_compute_rhs_2, 14, sizeof(double), &xxcon2); brisbane_kernel_setarg(kernel_compute_rhs_2, 15, sizeof(double), &xxcon3); brisbane_kernel_setarg(kernel_compute_rhs_2, 16, sizeof(double), &xxcon4); brisbane_kernel_setarg(kernel_compute_rhs_2, 17, sizeof(double), &xxcon5); brisbane_kernel_setarg(kernel_compute_rhs_2, 18, sizeof(double), &con43); brisbane_kernel_setarg(kernel_compute_rhs_2, 19, sizeof(double), &c1); brisbane_kernel_setarg(kernel_compute_rhs_2, 20, sizeof(double), &c2); brisbane_task task2; brisbane_task_create(&task2); brisbane_task_kernel(task2, kernel_compute_rhs_2, 3, kernel_compute_rhs_2_off, kernel_compute_rhs_2_idx); brisbane_task_submit(task2, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,uijk,up1,um1) collapse(2) #else #pragma omp target teams distribute parallel for simd private(uijk,up1,um1) collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(uijk,up1,um1) #endif for (i = 1; i <= nx2; i++) { uijk = us[k][j][i]; up1 = us[k][j][i+1]; um1 = us[k][j][i-1]; rhs[0][k][j][i] = rhs[0][k][j][i] + dx1tx1 * (u[0][k][j][i+1] - 2.0*u[0][k][j][i] + u[0][k][j][i-1]) - tx2 * (u[1][k][j][i+1] - u[1][k][j][i-1]); rhs[1][k][j][i] = rhs[1][k][j][i] + dx2tx1 * (u[1][k][j][i+1] - 2.0*u[1][k][j][i] + u[1][k][j][i-1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[1][k][j][i+1]*up1 - u[1][k][j][i-1]*um1 + (u[4][k][j][i+1] - square[k][j][i+1] - u[4][k][j][i-1] + square[k][j][i-1]) * c2); rhs[2][k][j][i] = rhs[2][k][j][i] + dx3tx1 * (u[2][k][j][i+1] - 2.0*u[2][k][j][i] + u[2][k][j][i-1]) + xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] + vs[k][j][i-1]) - tx2 * (u[2][k][j][i+1]*up1 - u[2][k][j][i-1]*um1); rhs[3][k][j][i] = rhs[3][k][j][i] + dx4tx1 * (u[3][k][j][i+1] - 2.0*u[3][k][j][i] + u[3][k][j][i-1]) + xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] + ws[k][j][i-1]) - tx2 * (u[3][k][j][i+1]*up1 - u[3][k][j][i-1]*um1); rhs[4][k][j][i] = rhs[4][k][j][i] + dx5tx1 * (u[4][k][j][i+1] - 2.0*u[4][k][j][i] + u[4][k][j][i-1]) + xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] + qs[k][j][i-1]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[4][k][j][i+1]*rho_i[k][j][i+1] - 2.0*u[4][k][j][i]*rho_i[k][j][i] + u[4][k][j][i-1]*rho_i[k][j][i-1]) - tx2 * ( (c1*u[4][k][j][i+1] - c2*square[k][j][i+1])*up1 - (c1*u[4][k][j][i-1] - c2*square[k][j][i-1])*um1 ); } } } /*end k*/ #endif //--------------------------------------------------------------------- // add fourth order xi-direction dissipation //--------------------------------------------------------------------- i = 1; size_t kernel_compute_rhs_3_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_3_idx[3] = { 5, ny2, nz2 }; brisbane_kernel kernel_compute_rhs_3; brisbane_kernel_create("compute_rhs_3", &kernel_compute_rhs_3); brisbane_kernel_setmem(kernel_compute_rhs_3, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_3, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_3, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_3, 3, sizeof(int), &i); brisbane_task task3; brisbane_task_create(&task3); brisbane_task_kernel(task3, kernel_compute_rhs_3, 3, kernel_compute_rhs_3_off, kernel_compute_rhs_3_idx); brisbane_task_submit(task3, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(j,k,m) collapse(3) for (k = 1; k <= nz2; k++){ for (j = 1; j <= ny2; j++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i]- dssp * (5.0*u[m][k][j][i] - 4.0*u[m][k][j][i+1] + u[m][k][j][i+2]); } } } #endif i = 2; size_t kernel_compute_rhs_4_off[3] = { 1, 1, 0 }; size_t kernel_compute_rhs_4_idx[3] = { ny2, nz2, 5 }; brisbane_kernel kernel_compute_rhs_4; brisbane_kernel_create("compute_rhs_4", &kernel_compute_rhs_4); brisbane_kernel_setmem(kernel_compute_rhs_4, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_4, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_4, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_4, 3, sizeof(int), &i); brisbane_task task4; brisbane_task_create(&task4); brisbane_task_kernel(task4, kernel_compute_rhs_4, 3, kernel_compute_rhs_4_off, kernel_compute_rhs_4_idx); brisbane_task_submit(task4, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(j,k,m) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (m = 0; m < 5; m++) { for (k = 1; k <= nz2; k++){ #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= ny2; j++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * (-4.0*u[m][k][j][i-1] + 6.0*u[m][k][j][i] - 4.0*u[m][k][j][i+1] + u[m][k][j][i+2]); } } } #endif size_t kernel_compute_rhs_5_off[3] = { 3, 1, 1 }; size_t kernel_compute_rhs_5_idx[3] = { nx2 - 4, ny2, nz2 }; brisbane_kernel kernel_compute_rhs_5; brisbane_kernel_create("compute_rhs_5", &kernel_compute_rhs_5); brisbane_kernel_setmem(kernel_compute_rhs_5, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_5, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_5, 2, sizeof(double), &dssp); brisbane_task task5; brisbane_task_create(&task5); brisbane_task_kernel(task5, kernel_compute_rhs_5, 3, kernel_compute_rhs_5_off, kernel_compute_rhs_5_idx); brisbane_task_submit(task5, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(4) #endif for (k = 1; k <= nz2; k++){ for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 3; i <= nx2-2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k][j][i-2] - 4.0*u[m][k][j][i-1] + 6.0*u[m][k][j][i] - 4.0*u[m][k][j][i+1] + u[m][k][j][i+2] ); } } } } #endif i = nx2-1; size_t kernel_compute_rhs_6_off[1] = { 1 }; size_t kernel_compute_rhs_6_idx[1] = { nz2 }; brisbane_kernel kernel_compute_rhs_6; brisbane_kernel_create("compute_rhs_6", &kernel_compute_rhs_6); brisbane_kernel_setmem(kernel_compute_rhs_6, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_6, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_6, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_6, 3, sizeof(int), &i); brisbane_task task6; brisbane_task_create(&task6); brisbane_task_kernel(task6, kernel_compute_rhs_6, 1, kernel_compute_rhs_6_off, kernel_compute_rhs_6_idx); brisbane_task_submit(task6, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(j,k,m) for (k = 1; k <= nz2; k++){ for (j = 1; j <= ny2; j++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k][j][i-2] - 4.0*u[m][k][j][i-1] + 6.0*u[m][k][j][i] - 4.0*u[m][k][j][i+1] ); } } } #endif i = nx2; size_t kernel_compute_rhs_7_off[1] = { 1 }; size_t kernel_compute_rhs_7_idx[1] = { nz2 }; brisbane_kernel kernel_compute_rhs_7; brisbane_kernel_create("compute_rhs_7", &kernel_compute_rhs_7); brisbane_kernel_setmem(kernel_compute_rhs_7, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_7, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_7, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_7, 3, sizeof(int), &i); brisbane_task task7; brisbane_task_create(&task7); brisbane_task_kernel(task7, kernel_compute_rhs_7, 1, kernel_compute_rhs_7_off, kernel_compute_rhs_7_idx); brisbane_task_submit(task7, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(j,k,m) for (k = 1; k <= nz2; k++){ for (j = 1; j <= ny2; j++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k][j][i-2] - 4.0*u[m][k][j][i-1] + 5.0*u[m][k][j][i] ); } } } #endif //--------------------------------------------------------------------- // compute eta-direction fluxes //--------------------------------------------------------------------- size_t kernel_compute_rhs_8_off[3] = { 1, 1, 1 }; size_t kernel_compute_rhs_8_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_compute_rhs_8; brisbane_kernel_create("compute_rhs_8", &kernel_compute_rhs_8); brisbane_kernel_setmem(kernel_compute_rhs_8, 0, mem_vs, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_8, 1, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_8, 2, mem_u, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_8, 3, mem_us, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_8, 4, mem_square, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_8, 5, mem_ws, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_8, 6, mem_qs, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_8, 7, mem_rho_i, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_8, 8, sizeof(double), &dy1ty1); brisbane_kernel_setarg(kernel_compute_rhs_8, 9, sizeof(double), &dy2ty1); brisbane_kernel_setarg(kernel_compute_rhs_8, 10, sizeof(double), &dy3ty1); brisbane_kernel_setarg(kernel_compute_rhs_8, 11, sizeof(double), &dy4ty1); brisbane_kernel_setarg(kernel_compute_rhs_8, 12, sizeof(double), &dy5ty1); brisbane_kernel_setarg(kernel_compute_rhs_8, 13, sizeof(double), &ty2); brisbane_kernel_setarg(kernel_compute_rhs_8, 14, sizeof(double), &yycon2); brisbane_kernel_setarg(kernel_compute_rhs_8, 15, sizeof(double), &yycon3); brisbane_kernel_setarg(kernel_compute_rhs_8, 16, sizeof(double), &yycon4); brisbane_kernel_setarg(kernel_compute_rhs_8, 17, sizeof(double), &yycon5); brisbane_kernel_setarg(kernel_compute_rhs_8, 18, sizeof(double), &con43); brisbane_kernel_setarg(kernel_compute_rhs_8, 19, sizeof(double), &c1); brisbane_kernel_setarg(kernel_compute_rhs_8, 20, sizeof(double), &c2); brisbane_task task8; brisbane_task_create(&task8); brisbane_task_kernel(task8, kernel_compute_rhs_8, 3, kernel_compute_rhs_8_off, kernel_compute_rhs_8_idx); brisbane_task_submit(task8, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(vijk,vp1,vm1,i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd private(vijk,vp1,vm1) collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(vijk,vp1,vm1) #endif for (i = 1; i <= nx2; i++) { vijk = vs[k][j][i]; vp1 = vs[k][j+1][i]; vm1 = vs[k][j-1][i]; rhs[0][k][j][i] = rhs[0][k][j][i] + dy1ty1 * (u[0][k][j+1][i] - 2.0*u[0][k][j][i] + u[0][k][j-1][i]) - ty2 * (u[2][k][j+1][i] - u[2][k][j-1][i]); rhs[1][k][j][i] = rhs[1][k][j][i] + dy2ty1 * (u[1][k][j+1][i] - 2.0*u[1][k][j][i] + u[1][k][j-1][i]) + yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] + us[k][j-1][i]) - ty2 * (u[1][k][j+1][i]*vp1 - u[1][k][j-1][i]*vm1); rhs[2][k][j][i] = rhs[2][k][j][i] + dy3ty1 * (u[2][k][j+1][i] - 2.0*u[2][k][j][i] + u[2][k][j-1][i]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[2][k][j+1][i]*vp1 - u[2][k][j-1][i]*vm1 + (u[4][k][j+1][i] - square[k][j+1][i] - u[4][k][j-1][i] + square[k][j-1][i]) * c2); rhs[3][k][j][i] = rhs[3][k][j][i] + dy4ty1 * (u[3][k][j+1][i] - 2.0*u[3][k][j][i] + u[3][k][j-1][i]) + yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] + ws[k][j-1][i]) - ty2 * (u[3][k][j+1][i]*vp1 - u[3][k][j-1][i]*vm1); rhs[4][k][j][i] = rhs[4][k][j][i] + dy5ty1 * (u[4][k][j+1][i] - 2.0*u[4][k][j][i] + u[4][k][j-1][i]) + yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] + qs[k][j-1][i]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[4][k][j+1][i]*rho_i[k][j+1][i] - 2.0*u[4][k][j][i]*rho_i[k][j][i] + u[4][k][j-1][i]*rho_i[k][j-1][i]) - ty2 * ((c1*u[4][k][j+1][i] - c2*square[k][j+1][i]) * vp1 - (c1*u[4][k][j-1][i] - c2*square[k][j-1][i]) * vm1); } } } #endif //--------------------------------------------------------------------- // add fourth order eta-direction dissipation //--------------------------------------------------------------------- j = 1; size_t kernel_compute_rhs_9_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_9_idx[3] = { 5, nx2, nz2 }; brisbane_kernel kernel_compute_rhs_9; brisbane_kernel_create("compute_rhs_9", &kernel_compute_rhs_9); brisbane_kernel_setmem(kernel_compute_rhs_9, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_9, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_9, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_9, 3, sizeof(int), &j); brisbane_task task9; brisbane_task_create(&task9); brisbane_task_kernel(task9, kernel_compute_rhs_9, 3, kernel_compute_rhs_9_off, kernel_compute_rhs_9_idx); brisbane_task_submit(task9, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,k,m) collapse(3) for (k = 1; k <= nz2; k++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i]- dssp * ( 5.0*u[m][k][j][i] - 4.0*u[m][k][j+1][i] + u[m][k][j+2][i]); } } } #endif j = 2; size_t kernel_compute_rhs_10_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_10_idx[3] = { 5, nx2, nz2 }; brisbane_kernel kernel_compute_rhs_10; brisbane_kernel_create("compute_rhs_10", &kernel_compute_rhs_10); brisbane_kernel_setmem(kernel_compute_rhs_10, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_10, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_10, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_10, 3, sizeof(int), &j); brisbane_task task10; brisbane_task_create(&task10); brisbane_task_kernel(task10, kernel_compute_rhs_10, 3, kernel_compute_rhs_10_off, kernel_compute_rhs_10_idx); brisbane_task_submit(task10, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,k,m) collapse(3) for (k = 1; k <= nz2; k++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * (-4.0*u[m][k][j-1][i] + 6.0*u[m][k][j][i] - 4.0*u[m][k][j+1][i] + u[m][k][j+2][i]); } } } #endif size_t kernel_compute_rhs_11_off[3] = { 1, 3, 1 }; size_t kernel_compute_rhs_11_idx[3] = { nx2, ny2-4, nz2 }; brisbane_kernel kernel_compute_rhs_11; brisbane_kernel_create("compute_rhs_11", &kernel_compute_rhs_11); brisbane_kernel_setmem(kernel_compute_rhs_11, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_11, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_11, 2, sizeof(double), &dssp); brisbane_task task11; brisbane_task_create(&task11); brisbane_task_kernel(task11, kernel_compute_rhs_11, 3, kernel_compute_rhs_11_off, kernel_compute_rhs_11_idx); brisbane_task_submit(task11, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(4) #endif for (k = 1; k <= nz2; k++) { for (j = 3; j <= ny2-2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k][j-2][i] - 4.0*u[m][k][j-1][i] + 6.0*u[m][k][j][i] - 4.0*u[m][k][j+1][i] + u[m][k][j+2][i] ); } } } } #endif j = ny2-1; size_t kernel_compute_rhs_12_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_12_idx[3] = { 5, nx2, nz2 }; brisbane_kernel kernel_compute_rhs_12; brisbane_kernel_create("compute_rhs_12", &kernel_compute_rhs_12); brisbane_kernel_setmem(kernel_compute_rhs_12, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_12, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_12, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_12, 3, sizeof(int), &j); brisbane_task task12; brisbane_task_create(&task12); brisbane_task_kernel(task12, kernel_compute_rhs_12, 3, kernel_compute_rhs_12_off, kernel_compute_rhs_12_idx); brisbane_task_submit(task12, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,k,m) collapse(3) for (k = 1; k <= nz2; k++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k][j-2][i] - 4.0*u[m][k][j-1][i] + 6.0*u[m][k][j][i] - 4.0*u[m][k][j+1][i] ); } } } #endif j = ny2; size_t kernel_compute_rhs_13_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_13_idx[3] = { 5, nx2, nz2 }; brisbane_kernel kernel_compute_rhs_13; brisbane_kernel_create("compute_rhs_13", &kernel_compute_rhs_13); brisbane_kernel_setmem(kernel_compute_rhs_13, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_13, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_13, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_13, 3, sizeof(int), &j); brisbane_task task13; brisbane_task_create(&task13); brisbane_task_kernel(task13, kernel_compute_rhs_13, 3, kernel_compute_rhs_13_off, kernel_compute_rhs_13_idx); brisbane_task_submit(task13, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,k,m) collapse(3) for (k = 1; k <= nz2; k++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k][j-2][i] - 4.0*u[m][k][j-1][i] + 5.0*u[m][k][j][i] ); } } } #endif //--------------------------------------------------------------------- // compute zeta-direction fluxes //--------------------------------------------------------------------- size_t kernel_compute_rhs_14_off[3] = { 1, 1, 1 }; size_t kernel_compute_rhs_14_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_compute_rhs_14; brisbane_kernel_create("compute_rhs_14", &kernel_compute_rhs_14); brisbane_kernel_setmem(kernel_compute_rhs_14, 0, mem_ws, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_14, 1, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_14, 2, mem_u, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_14, 3, mem_us, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_14, 4, mem_vs, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_14, 5, mem_square, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_14, 6, mem_qs, brisbane_r); brisbane_kernel_setmem(kernel_compute_rhs_14, 7, mem_rho_i, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_14, 8, sizeof(double), &dz1tz1); brisbane_kernel_setarg(kernel_compute_rhs_14, 9, sizeof(double), &dz2tz1); brisbane_kernel_setarg(kernel_compute_rhs_14, 10, sizeof(double), &dz3tz1); brisbane_kernel_setarg(kernel_compute_rhs_14, 11, sizeof(double), &dz4tz1); brisbane_kernel_setarg(kernel_compute_rhs_14, 12, sizeof(double), &dz5tz1); brisbane_kernel_setarg(kernel_compute_rhs_14, 13, sizeof(double), &tz2); brisbane_kernel_setarg(kernel_compute_rhs_14, 14, sizeof(double), &zzcon2); brisbane_kernel_setarg(kernel_compute_rhs_14, 15, sizeof(double), &zzcon3); brisbane_kernel_setarg(kernel_compute_rhs_14, 16, sizeof(double), &zzcon4); brisbane_kernel_setarg(kernel_compute_rhs_14, 17, sizeof(double), &zzcon5); brisbane_kernel_setarg(kernel_compute_rhs_14, 18, sizeof(double), &con43); brisbane_kernel_setarg(kernel_compute_rhs_14, 19, sizeof(double), &c1); brisbane_kernel_setarg(kernel_compute_rhs_14, 20, sizeof(double), &c2); brisbane_task task14; brisbane_task_create(&task14); brisbane_task_kernel(task14, kernel_compute_rhs_14, 3, kernel_compute_rhs_14_off, kernel_compute_rhs_14_idx); brisbane_task_submit(task14, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,wijk,wp1,wm1) collapse(2) #else #pragma omp target teams distribute parallel for simd private(wijk,wp1,wm1) collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(wijk,wp1,wm1) #endif for (i = 1; i <= nx2; i++) { wijk = ws[k][j][i]; wp1 = ws[k+1][j][i]; wm1 = ws[k-1][j][i]; rhs[0][k][j][i] = rhs[0][k][j][i] + dz1tz1 * (u[0][k+1][j][i] - 2.0*u[0][k][j][i] + u[0][k-1][j][i]) - tz2 * (u[3][k+1][j][i] - u[3][k-1][j][i]); rhs[1][k][j][i] = rhs[1][k][j][i] + dz2tz1 * (u[1][k+1][j][i] - 2.0*u[1][k][j][i] + u[1][k-1][j][i]) + zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] + us[k-1][j][i]) - tz2 * (u[1][k+1][j][i]*wp1 - u[1][k-1][j][i]*wm1); rhs[2][k][j][i] = rhs[2][k][j][i] + dz3tz1 * (u[2][k+1][j][i] - 2.0*u[2][k][j][i] + u[2][k-1][j][i]) + zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] + vs[k-1][j][i]) - tz2 * (u[2][k+1][j][i]*wp1 - u[2][k-1][j][i]*wm1); rhs[3][k][j][i] = rhs[3][k][j][i] + dz4tz1 * (u[3][k+1][j][i] - 2.0*u[3][k][j][i] + u[3][k-1][j][i]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[3][k+1][j][i]*wp1 - u[3][k-1][j][i]*wm1 + (u[4][k+1][j][i] - square[k+1][j][i] - u[4][k-1][j][i] + square[k-1][j][i]) * c2); rhs[4][k][j][i] = rhs[4][k][j][i] + dz5tz1 * (u[4][k+1][j][i] - 2.0*u[4][k][j][i] + u[4][k-1][j][i]) + zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] + qs[k-1][j][i]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[4][k+1][j][i]*rho_i[k+1][j][i] - 2.0*u[4][k][j][i]*rho_i[k][j][i] + u[4][k-1][j][i]*rho_i[k-1][j][i]) - tz2 * ((c1*u[4][k+1][j][i] - c2*square[k+1][j][i])*wp1 - (c1*u[4][k-1][j][i] - c2*square[k-1][j][i])*wm1); } } } #endif //--------------------------------------------------------------------- // add fourth order zeta-direction dissipation //--------------------------------------------------------------------- k = 1; size_t kernel_compute_rhs_15_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_15_idx[3] = { 5, nx2, ny2 }; brisbane_kernel kernel_compute_rhs_15; brisbane_kernel_create("compute_rhs_15", &kernel_compute_rhs_15); brisbane_kernel_setmem(kernel_compute_rhs_15, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_15, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_15, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_15, 3, sizeof(int), &k); brisbane_task task15; brisbane_task_create(&task15); brisbane_task_kernel(task15, kernel_compute_rhs_15, 3, kernel_compute_rhs_15_off, kernel_compute_rhs_15_idx); brisbane_task_submit(task15, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,m) collapse(3) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i]- dssp * (5.0*u[m][k][j][i] - 4.0*u[m][k+1][j][i] + u[m][k+2][j][i]); } } } #endif k = 2; size_t kernel_compute_rhs_16_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_16_idx[3] = { 5, nx2, ny2 }; brisbane_kernel kernel_compute_rhs_16; brisbane_kernel_create("compute_rhs_16", &kernel_compute_rhs_16); brisbane_kernel_setmem(kernel_compute_rhs_16, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_16, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_16, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_16, 3, sizeof(int), &k); brisbane_task task16; brisbane_task_create(&task16); brisbane_task_kernel(task16, kernel_compute_rhs_16, 3, kernel_compute_rhs_16_off, kernel_compute_rhs_16_idx); brisbane_task_submit(task16, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,m) collapse(3) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * (-4.0*u[m][k-1][j][i] + 6.0*u[m][k][j][i] - 4.0*u[m][k+1][j][i] + u[m][k+2][j][i]); } } } #endif size_t kernel_compute_rhs_17_off[3] = { 1, 1, 3 }; size_t kernel_compute_rhs_17_idx[3] = { nx2, ny2, nz2-4 }; brisbane_kernel kernel_compute_rhs_17; brisbane_kernel_create("compute_rhs_17", &kernel_compute_rhs_17); brisbane_kernel_setmem(kernel_compute_rhs_17, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_17, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_17, 2, sizeof(double), &dssp); brisbane_task task17; brisbane_task_create(&task17); brisbane_task_kernel(task17, kernel_compute_rhs_17, 3, kernel_compute_rhs_17_off, kernel_compute_rhs_17_idx); brisbane_task_submit(task17, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(4) #endif for (k = 3; k <= nz2-2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k-2][j][i] - 4.0*u[m][k-1][j][i] + 6.0*u[m][k][j][i] - 4.0*u[m][k+1][j][i] + u[m][k+2][j][i] ); } } } } #endif k = nz2-1; size_t kernel_compute_rhs_18_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_18_idx[3] = { 5, nx2, ny2 }; brisbane_kernel kernel_compute_rhs_18; brisbane_kernel_create("compute_rhs_18", &kernel_compute_rhs_18); brisbane_kernel_setmem(kernel_compute_rhs_18, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_18, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_18, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_18, 3, sizeof(int), &k); brisbane_task task18; brisbane_task_create(&task18); brisbane_task_kernel(task18, kernel_compute_rhs_18, 3, kernel_compute_rhs_18_off, kernel_compute_rhs_18_idx); brisbane_task_submit(task18, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,m) collapse(3) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k-2][j][i] - 4.0*u[m][k-1][j][i] + 6.0*u[m][k][j][i] - 4.0*u[m][k+1][j][i] ); } } } #endif k = nz2; size_t kernel_compute_rhs_19_off[3] = { 0, 1, 1 }; size_t kernel_compute_rhs_19_idx[3] = { 5, nx2, ny2 }; brisbane_kernel kernel_compute_rhs_19; brisbane_kernel_create("compute_rhs_19", &kernel_compute_rhs_19); brisbane_kernel_setmem(kernel_compute_rhs_19, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_compute_rhs_19, 1, mem_u, brisbane_r); brisbane_kernel_setarg(kernel_compute_rhs_19, 2, sizeof(double), &dssp); brisbane_kernel_setarg(kernel_compute_rhs_19, 3, sizeof(int), &k); brisbane_task task19; brisbane_task_create(&task19); brisbane_task_kernel(task19, kernel_compute_rhs_19, 3, kernel_compute_rhs_19_off, kernel_compute_rhs_19_idx); brisbane_task_submit(task19, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,m) collapse(3) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - dssp * ( u[m][k-2][j][i] - 4.0*u[m][k-1][j][i] + 5.0*u[m][k][j][i] ); } } } #endif size_t kernel_compute_rhs_20_off[3] = { 1, 1, 1 }; size_t kernel_compute_rhs_20_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_compute_rhs_20; brisbane_kernel_create("compute_rhs_20", &kernel_compute_rhs_20); brisbane_kernel_setmem(kernel_compute_rhs_20, 0, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_compute_rhs_20, 1, sizeof(double), &dt); brisbane_task task20; brisbane_task_create(&task20); brisbane_task_kernel(task20, kernel_compute_rhs_20, 3, kernel_compute_rhs_20_off, kernel_compute_rhs_20_idx); brisbane_task_submit(task20, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,k,m) collapse(3) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] * dt; } } } } #endif //}/* end omp target data*/ }
aarch64_vfabi_WidestDataSize.c
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +sve -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +sve -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s // REQUIRES: aarch64-registered-target // Note: -fopemp and -fopenmp-simd behavior are expected to be the same. // This test checks the values of Widest Data Size (WDS), as defined // in https://github.com/ARM-software/abi-aa/tree/master/vfabia64 // // WDS is used to check the accepted values <N> of `simdlen(<N>)` when // targeting fixed-length SVE vector function names. The values of // `<N>` that are accepted are such that for X = WDS * <N> * 8, // 128-bit <= X <= 2048-bit and X is a multiple of 128-bit. #pragma omp declare simd simdlen(8) #pragma omp declare simd simdlen(16) #pragma omp declare simd simdlen(256) #pragma omp declare simd simdlen(272) char WDS_is_sizeof_char(char in); // WDS = 1, simdlen(8) and simdlen(272) are not generated. // CHECK-DAG: _ZGVsM16v_WDS_is_sizeof_char // CHECK-DAG: _ZGVsM256v_WDS_is_sizeof_char // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_char #pragma omp declare simd simdlen(4) #pragma omp declare simd simdlen(8) #pragma omp declare simd simdlen(128) #pragma omp declare simd simdlen(136) char WDS_is_sizeof_short(short in); // WDS = 2, simdlen(4) and simdlen(136) are not generated. // CHECK-DAG: _ZGVsM8v_WDS_is_sizeof_short // CHECK-DAG: _ZGVsM128v_WDS_is_sizeof_short // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_short #pragma omp declare simd linear(sin) notinbranch simdlen(2) #pragma omp declare simd linear(sin) notinbranch simdlen(4) #pragma omp declare simd linear(sin) notinbranch simdlen(64) #pragma omp declare simd linear(sin) notinbranch simdlen(68) void WDS_is_sizeof_float_pointee(float in, float *sin); // WDS = 4, simdlen(2) and simdlen(68) are not generated. // CHECK-DAG: _ZGVsM4vl4_WDS_is_sizeof_float_pointee // CHECK-DAG: _ZGVsM64vl4_WDS_is_sizeof_float_pointee // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_float_pointee #pragma omp declare simd linear(sin) notinbranch simdlen(2) #pragma omp declare simd linear(sin) notinbranch simdlen(4) #pragma omp declare simd linear(sin) notinbranch simdlen(32) #pragma omp declare simd linear(sin) notinbranch simdlen(34) void WDS_is_sizeof_double_pointee(float in, double *sin); // WDS = 8 because of the linear clause, simdlen(34) is not generated. // CHECK-DAG: _ZGVsM2vl8_WDS_is_sizeof_double_pointee // CHECK-DAG: _ZGVsM4vl8_WDS_is_sizeof_double_pointee // CHECK-DAG: _ZGVsM32vl8_WDS_is_sizeof_double_pointee // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_double_pointee #pragma omp declare simd simdlen(2) #pragma omp declare simd simdlen(4) #pragma omp declare simd simdlen(32) #pragma omp declare simd simdlen(34) double WDS_is_sizeof_double(double in); // WDS = 8, simdlen(34) is not generated. // CHECK-DAG: _ZGVsM2v_WDS_is_sizeof_double // CHECK-DAG: _ZGVsM4v_WDS_is_sizeof_double // CHECK-DAG: _ZGVsM32v_WDS_is_sizeof_double // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_double static char C; static short S; static float F; static double D; void do_something() { C = WDS_is_sizeof_char(C); C = WDS_is_sizeof_short(S); WDS_is_sizeof_float_pointee(F, &F); WDS_is_sizeof_double_pointee(F, &D); D = WDS_is_sizeof_double(D); }
GB_unop__identity_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_fp32) // op(A') function: GB (_unop_tran__identity_uint16_fp32) // C type: uint16_t // A type: float // cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_fp32) ( uint16_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
backprop.c
/* ****************************************************************** * HISTORY * 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University * Prepared for 15-681, Fall 1994. * Modified by Shuai Che ****************************************************************** */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> //#define OPEN #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define fastcopy(to,from,len)\ {\ register char *_to,*_from;\ register int _i,_l;\ _to = (char *)(to);\ _from = (char *)(from);\ _l = (len);\ for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\ } /*** Return random number between 0.0 and 1.0 ***/ float drnd() { return ((float) rand() / (float) BIGRND); } /*** Return random number between -1.0 and 1.0 ***/ float dpn1() { return ((drnd() * 2.0) - 1.0); } /*** The squashing function. Currently, it's a sigmoid. ***/ float squash(x) float x; { float m; //x = -x; //m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120; //return(1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } /*** Allocate 1d array of floats ***/ float *alloc_1d_dbl(n) int n; { float *new; new = (float *) malloc ((unsigned) (n * sizeof (float))); if (new == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new); } /*** Allocate 2d array of floats ***/ float **alloc_2d_dbl(m, n) int m, n; { int i; float **new; new = (float **) malloc ((unsigned) (m * sizeof (float *))); if (new == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { new[i] = alloc_1d_dbl(n); } return (new); } bpnn_randomize_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float) rand()/RAND_MAX; // w[i][j] = dpn1(); } } } bpnn_randomize_row(w, m) float *w; int m; { int i; for (i = 0; i <= m; i++) { //w[i] = (float) rand()/RAND_MAX; w[i] = 0.1; } } bpnn_zero_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } void bpnn_initialize(seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } BPNN *bpnn_internal_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(net) BPNN *net; { int n1, n2, i; n1 = net->input_n; n2 = net->hidden_n; free((char *) net->input_units); free((char *) net->hidden_units); free((char *) net->output_units); free((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); for (i = 0; i <= n1; i++) { free((char *) net->input_weights[i]); free((char *) net->input_prev_weights[i]); } free((char *) net->input_weights); free((char *) net->input_prev_weights); for (i = 0; i <= n2; i++) { free((char *) net->hidden_weights[i]); free((char *) net->hidden_prev_weights[i]); } free((char *) net->hidden_weights); free((char *) net->hidden_prev_weights); free((char *) net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN *bpnn_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } void bpnn_layerforward(l1, l2, conn, n1, n2) float *l1, *l2, **conn; int n1, n2; { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } //extern "C" void bpnn_output_error(delta, target, output, nj, err) float *delta, *target, *output, *err; int nj; { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } void bpnn_hidden_error(delta_h, nh, delta_o, no, who, hidden, err) float *delta_h, *delta_o, *hidden, **who, *err; int nh, no; { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw) float *delta, *ly, **w, **oldw; { float new_dw; int k, j; ly[0] = 1.0; //eta = 0.3; //momentum = 0.3; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for \ shared(oldw, w, delta) \ private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } void bpnn_feedforward(net) BPNN *net; { int in, hid, out; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); } void bpnn_train(net, eo, eh) BPNN *net; float *eo, *eh; { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); /*** Compute error on output and hidden units. ***/ bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); *eo = out_err; *eh = hid_err; /*** Adjust input and hidden weights. ***/ bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); } void bpnn_save(net, filename) BPNN *net; char *filename; { int n1, n2, n3, i, j, memcnt; float dvalue, **w; char *mem; ///add// FILE *pFile; pFile = fopen( filename, "w+" ); /////// /* if ((fd = creat(filename, 0644)) == -1) { printf("BPNN_SAVE: Cannot create '%s'\n", filename); return; } */ n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n; printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename); //fflush(stdout); //write(fd, (char *) &n1, sizeof(int)); //write(fd, (char *) &n2, sizeof(int)); //write(fd, (char *) &n3, sizeof(int)); fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile); memcnt = 0; w = net->input_weights; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n1+1) * (n2+1) * sizeof(float)); fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile); free(mem); memcnt = 0; w = net->hidden_weights; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n2+1) * (n3+1) * sizeof(float)); fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile); free(mem); fclose(pFile); return; } BPNN *bpnn_read(filename) char *filename; { char *mem; BPNN *new; int fd, n1, n2, n3, i, j, memcnt; if ((fd = open(filename, 0, 0644)) == -1) { return (NULL); } printf("Reading '%s'\n", filename); //fflush(stdout); read(fd, (char *) &n1, sizeof(int)); read(fd, (char *) &n2, sizeof(int)); read(fd, (char *) &n3, sizeof(int)); new = bpnn_internal_create(n1, n2, n3); printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3); printf("Reading input weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); read(fd, mem, (n1+1) * (n2+1) * sizeof(float)); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); printf("Done\nReading hidden weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); read(fd, mem, (n2+1) * (n3+1) * sizeof(float)); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); close(fd); printf("Done\n"); //fflush(stdout); bpnn_zero_weights(new->input_prev_weights, n1, n2); bpnn_zero_weights(new->hidden_prev_weights, n2, n3); return (new); }
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void LogicComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req< mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>:: Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
timeTest.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <mpi.h> #include <omp.h> #include <immintrin.h> void print_vec(int* vec, int l) { for (int i = 0; i < l; i++) printf("%d ", vec[i]), fflush(stdout); printf("\n"); fflush(stdout); } void sum_AVX(int* vec1, int* vec2, int* vec3, int l) { for (int i = 0; i < l / 8; i++) { __m256i piece1 = _mm256_load_si256((const __m256i*)(vec1 + i * 8)); __m256i piece2 = _mm256_load_si256((const __m256i*)(vec2 + i * 8)); __m256i pieceSum = _mm256_add_epi32(piece1, piece2); _mm256_store_si256((__m256i*)(vec3 + i * 8), pieceSum); } for (int i = (l / 8) * 8; i < l; i++) vec3[i] = vec1[i] + vec2[i]; } void sum_base(int* vec1, int* vec2, int* vec3, int l) { for (int i = 0; i < l; i++) vec3[i] = vec1[i] + vec2[i]; } void vecSum_base(int* vec1_p, int* vec2_p, int* vec3_p, int l) { double startTime = MPI_Wtime(); sum_base(vec1_p, vec2_p, vec3_p, l); double endTime = MPI_Wtime(); printf("Base test completed in %lf\n", endTime - startTime); fflush(stdout); //print_vec(vec3_p, l); return; } void vecSum_AVX(int* vec1_p, int* vec2_p, int* vec3_p, int l) { double startTime = MPI_Wtime(); sum_AVX(vec1_p, vec2_p, vec3_p, l); double endTime = MPI_Wtime(); printf("AVX test completed in %lf\n", endTime - startTime); fflush(stdout); //print_vec(vec3_p, l); return; } void vecSum_MPI(int* vec1, int* vec2, int* vec3, int l, void (*sum_p) (int*, int*, int*, int)) { int myRank, commSize; MPI_Comm_rank(MPI_COMM_WORLD, &myRank); MPI_Comm_size(MPI_COMM_WORLD, &commSize); int* sendCount = (int*)malloc(commSize * sizeof(int)); int* displace = (int*)malloc(commSize * sizeof(int)); int localCount; // location information for each vector. int* vec1_p, * vec2_p, * vec3_p; if (!myRank) { vec1_p = vec1; vec2_p = vec2; vec3_p = vec3; // the master process get vector from function parameters. for (int i = 0; i < commSize; i++) sendCount[i] = l / commSize + (i < l % commSize); for (int i = 0; i < commSize; i++) displace[i] = i ? displace[i - 1] + sendCount[i - 1] : 0; // set parameters for scattering information. MPI_Scatter(sendCount, 1, MPI_INT, &localCount, 1, MPI_INT, 0, MPI_COMM_WORLD); // distribute local length to all members. } else { MPI_Scatter(sendCount, 1, MPI_INT, &localCount, 1, MPI_INT, 0, MPI_COMM_WORLD); vec1_p = (int*)malloc(localCount * sizeof(int)); vec2_p = (int*)malloc(localCount * sizeof(int)); vec3_p = (int*)malloc(localCount * sizeof(int)); if (vec1_p == NULL || vec2_p == NULL || vec3_p == NULL) { printf("Memory allocation failed.\n"); fflush(stdout); return; } // allocate memory for each vector. } if (!myRank) { MPI_Scatterv(vec1_p, sendCount, displace, MPI_INT, MPI_IN_PLACE, localCount, MPI_INT, 0, MPI_COMM_WORLD); MPI_Scatterv(vec2_p, sendCount, displace, MPI_INT, MPI_IN_PLACE, localCount, MPI_INT, 0, MPI_COMM_WORLD); } else { MPI_Scatterv(vec1_p, sendCount, displace, MPI_INT, vec1_p, localCount, MPI_INT, 0, MPI_COMM_WORLD); MPI_Scatterv(vec2_p, sendCount, displace, MPI_INT, vec2_p, localCount, MPI_INT, 0, MPI_COMM_WORLD); } // distribute two source vectors to other processes. sum_p(vec1_p, vec2_p, vec3_p, localCount); // add up part of vectors for every process. if (!myRank) { MPI_Gatherv(MPI_IN_PLACE, localCount, MPI_INT, vec3_p, sendCount, displace, MPI_INT, 0, MPI_COMM_WORLD); // print_vec(vec3_p, l); } else { MPI_Gatherv(vec3_p, localCount, MPI_INT, vec3_p, sendCount, displace, MPI_INT, 0, MPI_COMM_WORLD); } // collect sub-vectors from all processes. free(sendCount); free(displace); return; } void vecSum_openMP(int* vec1, int* vec2, int* vec3, int l, void (*sum_p) (int*, int*, int*, int)) { int threadNum = 1; #ifdef _OPENMP threadNum = 8; #endif double startTime = omp_get_wtime(); int i; if (sum_p == sum_base) { #pragma omp parallel for num_threads(threadNum) \ default(none) shared(vec1, vec2, vec3, l) private(i) for (i = 0; i < l; i++) vec3[i] = vec1[i] + vec2[i]; } else { #pragma omp parallel for num_threads(threadNum) \ default(none) shared(vec1, vec2, vec3, l) private(i) for (i = 0; i < l / 8; i++) { __m256i piece1 = _mm256_load_si256((const __m256i*)(vec1 + i * 8)); __m256i piece2 = _mm256_load_si256((const __m256i*)(vec2 + i * 8)); __m256i pieceSum = _mm256_add_epi32(piece1, piece2); _mm256_store_si256((__m256i*)(vec3 + i * 8), pieceSum); } #pragma omp parallel for num_threads(threadNum) \ default(none) shared(vec1, vec2, vec3, l) private(i) for (i = (l / 8) * 8; i < l; i++) vec3[i] = vec1[i] + vec2[i]; } double endTime = omp_get_wtime(); //print_vec(vec3, l); if (sum_p == sum_base) printf("openMP test completed in: %lf\n", endTime - startTime); else printf("openMP+AVX test completed in: %lf\n", endTime - startTime); return 0; } int main() { MPI_Init(NULL, NULL); int myRank, commSize; MPI_Comm_rank(MPI_COMM_WORLD, &myRank); MPI_Comm_size(MPI_COMM_WORLD, &commSize); int* vec1_p, * vec2_p, * vec3_p; int vecLength; // location information for each vector. double startTime, endTime; if (!myRank) { printf("Input length of the summed vectors:"); fflush(stdout); scanf("%d", &vecLength); // enter total length of the vectors. vec1_p = (int*)malloc(vecLength * sizeof(int)); vec2_p = (int*)malloc(vecLength * sizeof(int)); vec3_p = (int*)malloc(vecLength * sizeof(int)); if (vec1_p == NULL || vec2_p == NULL || vec3_p == NULL) { printf("Meomry allocation failed.\n"); fflush(stdout); MPI_Finalize(); return 0; } // allocate memory for each vector. srand((unsigned int)time(NULL)); for (int i = 0; i < vecLength; i++) { vec1_p[i] = rand(); vec2_p[i] = rand(); } // get random numbers in the vectors. // print_vec(vec1_p, vecLength); print_vec(vec2_p, vecLength); vecSum_base(vec1_p, vec2_p, vec3_p, vecLength); vecSum_AVX(vec1_p, vec2_p, vec3_p, vecLength); vecSum_openMP(vec1_p, vec2_p, vec3_p, vecLength, sum_base); vecSum_openMP(vec1_p, vec2_p, vec3_p, vecLength, sum_AVX); // let process 0 complete these single-process implementation. MPI_Barrier(MPI_COMM_WORLD); startTime = MPI_Wtime(); vecSum_MPI(vec1_p, vec2_p, vec3_p, vecLength, sum_base); // test using MPI multiprocessing. endTime = MPI_Wtime(); printf("MPI test completed in: %lf\n", endTime - startTime); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD); startTime = MPI_Wtime(); vecSum_MPI(vec1_p, vec2_p, vec3_p, vecLength, sum_AVX); // test using MPI as well as AVX parallelism. endTime = MPI_Wtime(); printf("MPI+AVX test completed in: %lf\n", endTime - startTime); fflush(stdout); free(vec1_p); free(vec2_p); free(vec3_p); } else { MPI_Barrier(MPI_COMM_WORLD); vecSum_MPI(NULL, NULL, NULL, 0, sum_base); // slave routine. Only receive parameters from master. MPI_Barrier(MPI_COMM_WORLD); vecSum_MPI(NULL, NULL, NULL, 0, sum_AVX); } MPI_Finalize(); return 0; }
convolution_sgemm_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -128) return -128; return (signed char)int32; } static void conv_im2col_sgemm_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; // im2row Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { for (int p=0; p<inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i+1); const signed char* img2 = bottom_im2row.row<signed char>(i+2); const signed char* img3 = bottom_im2row.row<signed char>(i+3); signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i/4 + i%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p+0)*inch*kernel_size; const signed char* k1 = kernel + (p+1)*inch*kernel_size; const signed char* k2 = kernel + (p+2)*inch*kernel_size; const signed char* k3 = kernel + (p+3)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4); int q=0; for (; q+1<inch*kernel_size; q+=2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4 + p%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; int* output0 = top_blob.channel(i); int* output1 = top_blob.channel(i+1); int* output2 = top_blob.channel(i+2); int* output3 = top_blob.channel(i+3); int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[2*n]; // k0 sum0[n] += (int)va[1] * vb[2*n+1]; sum1[n] += (int)va[2] * vb[2*n]; // k1 sum1[n] += (int)va[3] * vb[2*n+1]; sum2[n] += (int)va[4] * vb[2*n]; // k2 sum2[n] += (int)va[5] * vb[2*n+1]; sum3[n] += (int)va[6] * vb[2*n]; // k3 sum3[n] += (int)va[7] * vb[2*n+1]; } va += 8; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4); int k=0; for (; k+1<K; k=k+2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k<K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { int* output = top_blob.channel(i); int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4 + i%4); int sum[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[2*n]; sum[n] += (int)va[1] * vb[2*n+1]; } va += 2; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = sum[n]; } output += 4; } for (; j<N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4 + i%4); for (int k=0; k<K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_dequant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { for (int p=0; p<inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i+1); const signed char* img2 = bottom_im2row.row<signed char>(i+2); const signed char* img3 = bottom_im2row.row<signed char>(i+3); signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i/4 + i%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p+0)*inch*kernel_size; const signed char* k1 = kernel + (p+1)*inch*kernel_size; const signed char* k2 = kernel + (p+2)*inch*kernel_size; const signed char* k3 = kernel + (p+3)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4); int q=0; for (; q+1<inch*kernel_size; q+=2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4 + p%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i+1] : 0.f; const float bias2 = bias ? bias[i+2] : 0.f; const float bias3 = bias ? bias[i+3] : 0.f; const float scale_dequant0 = scale_dequant[i]; const float scale_dequant1 = scale_dequant[i+1]; const float scale_dequant2 = scale_dequant[i+2]; const float scale_dequant3 = scale_dequant[i+3]; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[2*n]; // k0 sum0[n] += (int)va[1] * vb[2*n+1]; sum1[n] += (int)va[2] * vb[2*n]; // k1 sum1[n] += (int)va[3] * vb[2*n+1]; sum2[n] += (int)va[4] * vb[2*n]; // k2 sum2[n] += (int)va[5] * vb[2*n+1]; sum3[n] += (int)va[6] * vb[2*n]; // k3 sum3[n] += (int)va[7] * vb[2*n+1]; } va += 8; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = (float)sum0[n] * scale_dequant0 + bias0; output1[n] = (float)sum1[n] * scale_dequant1 + bias1; output2[n] = (float)sum2[n] * scale_dequant2 + bias2; output3[n] = (float)sum3[n] * scale_dequant3 + bias3; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4); int k=0; for (; k+1<K; k=k+2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k<K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = (float)sum0 * scale_dequant0 + bias0; output1[0] = (float)sum1 * scale_dequant1 + bias1; output2[0] = (float)sum2 * scale_dequant2 + bias2; output3[0] = (float)sum3 * scale_dequant3 + bias3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_dequant0 = scale_dequant[i]; int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4 + i%4); int sum[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[2*n]; sum[n] += (int)va[1] * vb[2*n+1]; } va += 2; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = (float)sum[n] * scale_dequant0 + bias0; } output += 4; } for (; j<N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4 + i%4); for (int k=0; k<K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = (float)sum * scale_dequant0 + bias0; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_requant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { for (int p=0; p<inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i+1); const signed char* img2 = bottom_im2row.row<signed char>(i+2); const signed char* img3 = bottom_im2row.row<signed char>(i+3); signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i/4 + i%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p+0)*inch*kernel_size; const signed char* k1 = kernel + (p+1)*inch*kernel_size; const signed char* k2 = kernel + (p+2)*inch*kernel_size; const signed char* k3 = kernel + (p+3)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4); int q=0; for (; q+1<inch*kernel_size; q+=2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4 + p%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; signed char* output0 = top_blob.channel(i); signed char* output1 = top_blob.channel(i+1); signed char* output2 = top_blob.channel(i+2); signed char* output3 = top_blob.channel(i+3); const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i+1] : 0.f; const float bias2 = bias ? bias[i+2] : 0.f; const float bias3 = bias ? bias[i+3] : 0.f; const float scale_requant_in0 = scale_requant[2*i]; const float scale_requant_out0 = scale_requant[2*i+1]; const float scale_requant_in1 = scale_requant[2*(i+1)]; const float scale_requant_out1 = scale_requant[2*(i+1)+1]; const float scale_requant_in2 = scale_requant[2*(i+2)]; const float scale_requant_out2 = scale_requant[2*(i+2)+1]; const float scale_requant_in3 = scale_requant[2*(i+3)]; const float scale_requant_out3 = scale_requant[2*(i+3)+1]; int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[2*n]; // k0 sum0[n] += (int)va[1] * vb[2*n+1]; sum1[n] += (int)va[2] * vb[2*n]; // k1 sum1[n] += (int)va[3] * vb[2*n+1]; sum2[n] += (int)va[4] * vb[2*n]; // k2 sum2[n] += (int)va[5] * vb[2*n+1]; sum3[n] += (int)va[6] * vb[2*n]; // k3 sum3[n] += (int)va[7] * vb[2*n+1]; } va += 8; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0); output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1); output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2); output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3); } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4); int k=0; for (; k+1<K; k=k+2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k<K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { signed char* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_requant_in0 = scale_requant[2*i]; const float scale_requant_out0 = scale_requant[2*i+1]; int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4 + i%4); int sum[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[2*n]; sum[n] += (int)va[1] * vb[2*n+1]; } va += 2; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0); } output += 4; } for (; j<N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4 + i%4); for (int k=0; k<K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0); output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } }
openmp_atoms.c
/* Suggested makefile: ---------- P=openmp_atoms CFLAGS=-g -Wall -std=gnu99 -O3 -fopenmp $(P): ---------- */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> //memset #include "openmp_getmax.c" int main(){ long int max = 1e7; int *factor_ct = malloc(sizeof(int)*max); factor_ct[0] = 0; factor_ct[1] = 1; for (long int i=2; i< max; i++) factor_ct[i] = 2; #pragma omp parallel for for (long int i=2; i<= max/2; i++) for (long int scale=2; scale*i < max; scale++) { #pragma omp atomic update factor_ct[scale*i]++; } int max_factors = get_max(factor_ct, max); long int tally[max_factors+1]; memset(tally, 0, sizeof(long int)*(max_factors+1)); #pragma omp parallel for for (long int i=0; i< max; i++){ #pragma omp atomic update tally[factor_ct[i]]++; } for (int i=0; i<=max_factors; i++) printf("%i\t%li\n", i, tally[i]); }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() functoion */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() functoion */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( static_cast<size_t>(N), static_cast<size_t>(omp_threads))) { for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const int length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() functoion * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() functoion * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
GB_binop__bor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int32) // C=scalar+B GB (_bind1st__bor_int32) // C=scalar+B' GB (_bind1st_tran__bor_int32) // C=A+scalar GB (_bind2nd__bor_int32) // C=A'+scalar GB (_bind2nd_tran__bor_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT32 || GxB_NO_BOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dpado.202001310955.no_bp_labeling.h
// // Created by Zhen Peng on 1/6/20. // #ifndef PADO_DPADO_H #define PADO_DPADO_H #include <vector> //#include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> //#include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <math.h> #include <fstream> #include <omp.h> #include "globals.h" #include "dglobals.h" #include "dgraph.h" namespace PADO { template <VertexID BATCH_SIZE = 1024> class DistBVCPLL { private: static const VertexID BITPARALLEL_SIZE = 50; const inti THRESHOLD_PARALLEL = 0; // Structure for the type of label struct IndexType { struct Batch { // VertexID batch_id; // Batch ID VertexID start_index; // Index to the array distances where the batch starts VertexID size; // Number of distances element in this batch Batch() = default; Batch(VertexID start_index_, VertexID size_): start_index(start_index_), size(size_) { } // Batch(VertexID batch_id_, VertexID start_index_, VertexID size_): // batch_id(batch_id_), start_index(start_index_), size(size_) // { } }; struct DistanceIndexType { VertexID start_index; // Index to the array vertices where the same-distance vertices start VertexID size; // Number of the same-distance vertices UnweightedDist dist; // The real distance DistanceIndexType() = default; DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_): start_index(start_index_), size(size_), dist(dist_) { } }; // Bit-parallel Labels UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} std::vector<Batch> batches; // Batch info std::vector<DistanceIndexType> distances; // Distance info std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID size_t get_size_in_bytes() const { return sizeof(bp_dist) + sizeof(bp_sets) + // batches.size() * sizeof(Batch) + distances.size() * sizeof(DistanceIndexType) + vertices.size() * sizeof(VertexID); } void clean_all_indices() { std::vector<Batch>().swap(batches); std::vector<DistanceIndexType>().swap(distances); std::vector<VertexID>().swap(vertices); } }; //__attribute__((aligned(64))); struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // If the Batch structure is not used, the indicator could just be BATCH_SIZE long. // std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0); std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE); VertexID end_candidates_que = 0; std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0); void indicator_reset() { std::fill(indicator.begin(), indicator.end(), 0); } }; //__attribute__((aligned(64))); // Type of Bit-Parallel Label struct BPLabelType { UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 }; uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0} }; // Type of Label Message Unit, for initializing distance table struct LabelTableUnit { VertexID root_id; VertexID label_global_id; UnweightedDist dist; LabelTableUnit() = default; LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : root_id(r), label_global_id(l), dist(d) {} }; // Type of BitParallel Label Message Unit for initializing bit-parallel labels struct MsgBPLabel { VertexID r_root_id; UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; MsgBPLabel() = default; MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) : r_root_id(r) { memcpy(bp_dist, dist, sizeof(bp_dist)); memcpy(bp_sets, sets, sizeof(bp_sets)); } }; VertexID num_v = 0; VertexID num_masters = 0; // VertexID BATCH_SIZE = 0; int host_id = 0; int num_hosts = 0; MPI_Datatype V_ID_Type; std::vector<IndexType> L; inline void bit_parallel_push_labels( const DistGraph &G, VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, UnweightedDist iter); inline void bit_parallel_labeling( const DistGraph &G, std::vector<uint8_t> &used_bp_roots); // inline void bit_parallel_push_labels( // const DistGraph &G, // VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // UnweightedDist iter); // inline void bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots); inline void batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, const VertexID roots_size, const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated); // std::vector<bool> &once_candidated); inline VertexID initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots); // inline void push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter); inline void local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); inline void local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); // inline void local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter); inline bool distance_query( VertexID cand_root_id, VertexID v_id, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter); inline void insert_label_only_seq( VertexID cand_root_id, // VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send); // UnweightedDist iter); inline void insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send); inline void update_label_indices( const VertexID v_id, const VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter); inline void reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue); // template <typename E_T, typename F> // inline void every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun); template <typename E_T> inline void one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv); // // Function: get the destination host id which is i hop from this host. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_me_host_id(int hop) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // return (host_id + hop + num_hosts) % num_hosts; // } // // Function: get the destination host id which is i hop from the root. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_root_host_id(int hop, int root) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // assert(root >= 0 && root < num_hosts); // return (root + hop + num_hosts) % num_hosts; // } size_t get_index_size() { size_t bytes = 0; for (VertexID v_i = 0; v_i < num_masters; ++v_i) { bytes += L[v_i].get_size_in_bytes(); } return bytes; } // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // uint64_t normal_check_count = 0; // uint64_t total_candidates_num = 0; // uint64_t set_candidates_num = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //L2CacheMissRate cache_miss; // double message_time = 0; // double bp_labeling_time = 0; // double initializing_time = 0; // double scatter_time = 0; // double gather_time = 0; // double clearup_time = 0; // TotalInstructsExe candidating_ins_count; // TotalInstructsExe adding_ins_count; // TotalInstructsExe bp_labeling_ins_count; // TotalInstructsExe bp_checking_ins_count; // TotalInstructsExe dist_query_ins_count; uint64_t caller_line = 0; // End test public: // std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0); DistBVCPLL() = default; explicit DistBVCPLL( const DistGraph &G); // UnweightedDist dist_distance_query_pair( // VertexID a_global, // VertexID b_global, // const DistGraph &G); }; // class DistBVCPLL template <VertexID BATCH_SIZE> DistBVCPLL<BATCH_SIZE>:: DistBVCPLL( const DistGraph &G) { num_v = G.num_v; assert(num_v >= BATCH_SIZE); num_masters = G.num_masters; host_id = G.host_id; // { // if (1 == host_id) { // volatile int i = 0; // while (i == 0) { // sleep(5); // } // } // } num_hosts = G.num_hosts; V_ID_Type = G.V_ID_Type; // L.resize(num_v); L.resize(num_masters); VertexID remainer = num_v % BATCH_SIZE; VertexID b_i_bound = num_v / BATCH_SIZE; std::vector<uint8_t> used_bp_roots(num_v, 0); //cache_miss.measure_start(); double time_labeling = -WallTimer::get_time_mark(); // bp_labeling_time -= WallTimer::get_time_mark(); // bit_parallel_labeling(G, // used_bp_roots); // bp_labeling_time += WallTimer::get_time_mark(); {//test //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("host_id: %u bp_labeling_finished.\n", host_id); } //#endif } std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue. VertexID end_active_queue = 0; std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. // std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue. VertexID end_got_candidates_queue = 0; std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue // std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue std::vector<ShortIndex> short_index(num_masters); std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST)); std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue. // Used mainly for resetting short_index[v].indicator. VertexID end_once_candidated_queue = 0; std::vector<uint8_t> once_candidated(num_masters, false); // std::vector<bool> once_candidated(num_masters, false); std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table. std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels //printf("b_i_bound: %u\n", b_i_bound);//test for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // { ////#ifdef DEBUG_MESSAGES_ON // if (b_i % 4000 == 0 && 0 == host_id) { if (0 == host_id) { printf("b_i: %u\n", b_i);//test } ////#endif // } batch_process( G, // b_i, b_i * BATCH_SIZE, BATCH_SIZE, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // exit(EXIT_SUCCESS); //test } if (remainer != 0) { // { ////#ifdef DEBUG_MESSAGES_ON // if (0 == host_id) { // printf("b_i: %u\n", b_i_bound);//test // } ////#endif // } batch_process( G, // b_i_bound, b_i_bound * BATCH_SIZE, remainer, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); } time_labeling += WallTimer::get_time_mark(); //cache_miss.measure_stop(); // Test setlocale(LC_NUMERIC, ""); if (0 == host_id) { printf("BATCH_SIZE: %u ", BATCH_SIZE); printf("BP_Size: %u THRESHOLD_PARALLEL: %u\n", BITPARALLEL_SIZE, THRESHOLD_PARALLEL); } {// Total Number of Labels EdgeID local_num_labels = 0; for (VertexID v_global = 0; v_global < num_v; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size(); } EdgeID global_num_labels; MPI_Allreduce(&local_num_labels, &global_num_labels, 1, MPI_Instance::get_mpi_datatype<EdgeID>(), MPI_SUM, MPI_COMM_WORLD); // printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v); } // VertexID local_num_batches = 0; // VertexID local_num_distances = 0; //// double local_avg_distances_per_batches = 0; // for (VertexID v_global = 0; v_global < num_v; ++v_global) { // if (G.get_master_host_id(v_global) != host_id) { // continue; // } // VertexID v_local = G.get_local_vertex_id(v_global); // local_num_batches += L[v_local].batches.size(); // local_num_distances += L[v_local].distances.size(); //// double avg_d_p_b = 0; //// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) { //// avg_d_p_b += L[v_local].batches[i_b].size; //// } //// avg_d_p_b /= L[v_local].batches.size(); //// local_avg_distances_per_batches += avg_d_p_b; // } //// local_avg_distances_per_batches /= num_masters; //// double local_avg_batches = local_num_batches * 1.0 / num_masters; //// double local_avg_distances = local_num_distances * 1.0 / num_masters; // uint64_t global_num_batches = 0; // uint64_t global_num_distances = 0; // MPI_Allreduce( // &local_num_batches, // &global_num_batches, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_batches /= num_hosts; // MPI_Allreduce( // &local_num_distances, // &global_num_distances, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_distances /= num_hosts; // double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches; // double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances; // double global_avg_batches = global_num_batches / num_v; // double global_avg_distances = global_num_distances / num_v; //// MPI_Allreduce( //// &local_avg_distances_per_batches, //// &global_avg_d_p_b, //// 1, //// MPI_DOUBLE, //// MPI_SUM, //// MPI_COMM_WORLD); //// global_avg_d_p_b /= num_hosts; // MPI_Barrier(MPI_COMM_WORLD); // if (0 == host_id) { // printf("global_avg_batches: %f " // "global_avg_distances: %f " // "global_avg_distances_per_batch: %f " // "global_avg_labels_per_distance: %f\n", // global_avg_batches, // global_avg_distances, // global_avg_d_p_b, // global_avg_l_p_d); // } } // printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); // printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100); // uint64_t total_check_count = bp_hit_count + normal_check_count; // printf("total_check_count: %'llu\n", total_check_count); // printf("bp_hit_count: %'llu %.2f%%\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count); // printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n", // total_candidates_num, // set_candidates_num, // set_candidates_num * 100.0 / total_candidates_num); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); //cache_miss.print(); // printf("Candidating: "); candidating_ins_count.print(); // printf("Adding: "); adding_ins_count.print(); // printf("BP_Labeling: "); bp_labeling_ins_count.print(); // printf("BP_Checking: "); bp_checking_ins_count.print(); // printf("distance_query: "); dist_query_ins_count.print(); // if (0 == host_id) { // printf("num_hosts: %u host_id: %u\n" // "Local_labeling_time: %.2f seconds\n" // "bp_labeling_time: %.2f %.2f%%\n" // "initializing_time: %.2f %.2f%%\n" // "scatter_time: %.2f %.2f%%\n" // "gather_time: %.2f %.2f%%\n" // "clearup_time: %.2f %.2f%%\n" // "message_time: %.2f %.2f%%\n", // num_hosts, host_id, // time_labeling, // bp_labeling_time, 100.0 * bp_labeling_time / time_labeling, // initializing_time, 100.0 * initializing_time / time_labeling, // scatter_time, 100.0 * scatter_time / time_labeling, // gather_time, 100.0 * gather_time / time_labeling, // clearup_time, 100.0 * clearup_time / time_labeling, // message_time, 100.0 * message_time / time_labeling); // } double global_time_labeling; MPI_Allreduce(&time_labeling, &global_time_labeling, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("num_hosts: %d " "num_threads: %d " "Global_labeling_time: %.2f seconds\n", num_hosts, NUM_THREADS, global_time_labeling); } // End test } //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling( // const DistGraph &G, // std::vector<uint8_t> &used_bp_roots) //{ //// VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; // // std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_v); // active queue // std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // used_bp_roots[r] = true; // // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. //// VertexID i_bound = G.vertices[r] - 1; //// VertexID i_start = i_bound + G.out_degrees[r]; //// for (VertexID i = i_start; i > i_bound; --i) { // //int i_bound = G.vertices[r]; // //int i_start = i_bound + G.out_degrees[r] - 1; // //for (int i = i_start; i >= i_bound; --i) { // VertexID d_i_bound = G.local_out_degrees[r]; // EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1; // for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) { // EdgeID i = i_start - d_i; // VertexID v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = true; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // //} //// } // // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) { // VertexID v = que[que_i]; //// bit_parallel_push_labels(G, //// v, //// que, //// que_h, //// sibling_es, //// num_sibling_es, //// child_es, //// num_child_es, //// tmp_d, //// d); // EdgeID i_start = G.vertices_idx[v]; // EdgeID i_bound = i_start + G.local_out_degrees[v]; // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv = G.out_edges[i]; // UnweightedDist td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; // } // } // } // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // {// test // printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (4 == d) { //// exit(EXIT_SUCCESS); //// } // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_push_labels( const DistGraph &G, const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, const UnweightedDist iter) { EdgeID i_start = G.vertices_idx[v_global]; EdgeID i_bound = i_start + G.local_out_degrees[v_global]; // {//test // printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); // } for (EdgeID i = i_start; i < i_bound; ++i) { VertexID tv_global = G.out_edges[i]; VertexID tv_local = G.get_local_vertex_id(tv_global); UnweightedDist td = iter + 1; if (iter > dists[tv_local]) { ; } else if (iter == dists[tv_local]) { if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global; tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global; ++size_tmp_sibling_es; // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; } } else { // iter < dists[tv] if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) { tmp_q[offset_tmp_q + size_tmp_q++] = tv_global; } } // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global; tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global; ++size_tmp_child_es; // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; } } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_labeling( const DistGraph &G, // std::vector<IndexType> &L, std::vector<uint8_t> &used_bp_roots) { // Class type of Bit-Parallel label message unit. struct MsgUnitBP { VertexID v_global; uint64_t S_n1; uint64_t S_0; MsgUnitBP() = default; // MsgUnitBP(MsgUnitBP&& other) = default; // MsgUnitBP(MsgUnitBP& other) = default; // MsgUnitBP& operator=(const MsgUnitBP& other) = default; // MsgUnitBP& operator=(MsgUnitBP&& other) = default; MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) : v_global(v), S_n1(sn1), S_0(s0) { } }; // VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; EdgeID local_num_edges = G.num_edges_local; std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<VertexID> que(num_masters); // active queue VertexID end_que = 0; std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que VertexID end_tmp_que = 0; std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. VertexID r_global = 0; // root r for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // {// test // if (0 == host_id) { // printf("i_bpsp: %u\n", i_bpspt); // } // } // Select the root r_global if (0 == host_id) { while (r_global < num_v && used_bp_roots[r_global]) { ++r_global; } if (r_global == num_v) { for (VertexID v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; } continue; } } // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); MPI_Bcast(&r_global, 1, V_ID_Type, 0, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // Mark the r_global if (G.get_master_host_id(r_global) == host_id) { tmp_d[G.get_local_vertex_id(r_global)] = 0; que[end_que++] = r_global; } // Select the r_global's 64 neighbors { // Get r_global's neighbors into buffer_send, rank from high to low. VertexID local_degree = G.local_out_degrees[r_global]; std::vector<VertexID> buffer_send(local_degree); if (local_degree) { EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; for (VertexID d_i = 0; d_i < local_degree; ++d_i) { EdgeID e_i = e_i_start - d_i; buffer_send[d_i] = G.out_edges[e_i]; } } // Get selected neighbors (up to 64) std::vector<VertexID> selected_nbrs; if (0 != host_id) { // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); MPI_Instance::send_buffer_2_dst(buffer_send, 0, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // Receive selected neighbors from host 0 MPI_Instance::recv_buffer_from_src(selected_nbrs, 0, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); } else { // Host 0 // Host 0 receives neighbors from others std::vector<VertexID> all_nbrs(buffer_send); std::vector<VertexID > buffer_recv; for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); MPI_Instance::recv_buffer_from_any(buffer_recv, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); if (buffer_recv.empty()) { continue; } buffer_send.resize(buffer_send.size() + buffer_recv.size()); std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); all_nbrs.resize(buffer_send.size()); all_nbrs.assign(buffer_send.begin(), buffer_send.end()); } assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // Select 64 (or less) neighbors VertexID ns = 0; // number of selected neighbor, default 64 for (VertexID v_global : all_nbrs) { if (used_bp_roots[v_global]) { continue; } used_bp_roots[v_global] = 1; selected_nbrs.push_back(v_global); if (++ns == 64) { break; } } // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); for (int dest = 1; dest < num_hosts; ++dest) { MPI_Instance::send_buffer_2_dst(selected_nbrs, dest, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); } // message_time += WallTimer::get_time_mark(); } // {//test // printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); // } // Synchronize the used_bp_roots. for (VertexID v_global : selected_nbrs) { used_bp_roots[v_global] = 1; } // Mark selected neighbors for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { VertexID v_global = selected_nbrs[v_i]; if (host_id != G.get_master_host_id(v_global)) { continue; } tmp_que[end_tmp_que++] = v_global; tmp_d[G.get_local_vertex_id(v_global)] = 1; tmp_s[v_global].first = 1ULL << v_i; } } // Reduce the global number of active vertices VertexID global_num_actives = 1; UnweightedDist d = 0; while (global_num_actives) { // {// Limit the distance // if (d > 7) { // break; // } // } //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("d: %u que_size: %u\n", d, global_num_actives); // } // } //#endif // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { VertexID num_sibling_es = 0, num_child_es = 0; // Send active masters to mirrors { std::vector<MsgUnitBP> buffer_send(end_que); for (VertexID que_i = 0; que_i < end_que; ++que_i) { VertexID v_global = que[que_i]; buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); } // {// test // printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); // } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgUnitBP> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } // For parallel adding to queue VertexID size_buffer_recv = buffer_recv.size(); std::vector<VertexID> offsets_tmp_q(size_buffer_recv); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) { offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global]; } VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q); std::vector<VertexID> tmp_q(num_neighbors); std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0); // For parallel adding to sibling_es std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors); std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0); // For parallel adding to child_es std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors); std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0); #pragma omp parallel for // for (const MsgUnitBP &m : buffer_recv) { for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgUnitBP &m = buffer_recv[i_m]; VertexID v_global = m.v_global; if (!G.local_out_degrees[v_global]) { continue; } tmp_s[v_global].first = m.S_n1; tmp_s[v_global].second = m.S_0; // Push labels bit_parallel_push_labels( G, v_global, tmp_q, sizes_tmp_q[i_m], tmp_sibling_es, sizes_tmp_sibling_es[i_m], tmp_child_es, sizes_tmp_child_es[i_m], offsets_tmp_q[i_m], // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, tmp_d, d); } {// From tmp_sibling_es to sibling_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es); PADO::collect_into_queue( tmp_sibling_es, offsets_tmp_q, sizes_tmp_sibling_es, total_size_tmp, sibling_es, num_sibling_es); } {// From tmp_child_es to child_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es); PADO::collect_into_queue( tmp_child_es, offsets_tmp_q, sizes_tmp_child_es, total_size_tmp, child_es, num_child_es); } {// From tmp_q to tmp_que idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q); PADO::collect_into_queue( tmp_q, offsets_tmp_q, sizes_tmp_q, total_size_tmp, tmp_que, end_tmp_que); } // {// test // printf("host_id: %u root: %u done push.\n", host_id, root); // } } } // Update the sets in tmp_s { #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; } // Put into the buffer sending to others std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first; VertexID w = sibling_es[i].second; buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); } // Send the messages for (int root = 0; root < num_hosts; ++root) { std::vector< std::pair<VertexID, uint64_t> > buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } size_t i_m_bound = buffer_recv.size(); #pragma omp parallel for for (size_t i_m = 0; i_m < i_m_bound; ++i_m) { const auto &m = buffer_recv[i_m]; __atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST); } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } } #pragma omp parallel for for (VertexID i = 0; i < num_child_es; ++i) { VertexID v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } } //#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // //// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (0 == d) { //// exit(EXIT_SUCCESS); //// } // } //#endif // Swap que and tmp_que tmp_que.swap(que); end_que = end_tmp_que; end_tmp_que = 0; // message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_que, &global_num_actives, 1, V_ID_Type, MPI_MAX, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } ++d; } #pragma omp parallel for for (VertexID v_local = 0; v_local < num_masters; ++v_local) { VertexID v_global = G.get_global_vertex_id(v_local); L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_push_labels( // const DistGraph &G, // const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // const UnweightedDist iter) //{ // EdgeID i_start = G.vertices_idx[v_global]; // EdgeID i_bound = i_start + G.local_out_degrees[v_global]; //// {//test //// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); //// } // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv_global = G.out_edges[i]; // VertexID tv_local = G.get_local_vertex_id(tv_global); // UnweightedDist td = iter + 1; // // if (iter > dists[tv_local]) { // ; // } else if (iter == dists[tv_local]) { // if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; // } // } else { // iter < dists[tv] // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; //// { //// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test //// } // } // } // //} // //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots) //{ // // Class type of Bit-Parallel label message unit. // struct MsgUnitBP { // VertexID v_global; // uint64_t S_n1; // uint64_t S_0; // // MsgUnitBP() = default; //// MsgUnitBP(MsgUnitBP&& other) = default; //// MsgUnitBP(MsgUnitBP& other) = default; //// MsgUnitBP& operator=(const MsgUnitBP& other) = default; //// MsgUnitBP& operator=(MsgUnitBP&& other) = default; // MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) // : v_global(v), S_n1(sn1), S_0(s0) { } // }; //// VertexID num_v = G.num_v; //// EdgeID num_e = G.num_e; // EdgeID local_num_edges = G.num_edges_local; // // std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_masters); // active queue // VertexID end_que = 0; // std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que // VertexID end_tmp_que = 0; // std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. // //// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v //// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} //// std::vector<VertexID> que(num_v); // active queue //// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) //// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r_global = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // // Select the root r_global // if (0 == host_id) { // while (r_global < num_v && used_bp_roots[r_global]) { // ++r_global; // } // if (r_global == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // } // // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&r_global, // 1, // V_ID_Type, // 0, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // //// VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // // Mark the r_global // if (G.get_master_host_id(r_global) == host_id) { // tmp_d[G.get_local_vertex_id(r_global)] = 0; // que[end_que++] = r_global; // } // // Select the r_global's 64 neighbors // { // // Get r_global's neighbors into buffer_send, rank from low to high. // VertexID local_degree = G.local_out_degrees[r_global]; // std::vector<VertexID> buffer_send(local_degree); // if (local_degree) { // EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; // for (VertexID d_i = 0; d_i < local_degree; ++d_i) { // EdgeID e_i = e_i_start - d_i; // buffer_send[d_i] = G.out_edges[e_i]; // } // } // // // Get selected neighbors (up to 64) // std::vector<VertexID> selected_nbrs; // if (0 != host_id) { // // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // 0, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); // // Receive selected neighbors from host 0 // MPI_Instance::recv_buffer_from_src(selected_nbrs, // 0, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // } else { // // Host 0 // // Host 0 receives neighbors from others // std::vector<VertexID> all_nbrs(buffer_send); // std::vector<VertexID > buffer_recv; // for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); // MPI_Instance::recv_buffer_from_any(buffer_recv, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); //// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv, //// num_hosts, //// SENDING_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // if (buffer_recv.empty()) { // continue; // } // // buffer_send.resize(buffer_send.size() + buffer_recv.size()); // std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); // all_nbrs.resize(buffer_send.size()); // all_nbrs.assign(buffer_send.begin(), buffer_send.end()); // } // assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // // Select 64 (or less) neighbors // VertexID ns = 0; // number of selected neighbor, default 64 // for (VertexID v_global : all_nbrs) { // if (used_bp_roots[v_global]) { // continue; // } // used_bp_roots[v_global] = 1; // selected_nbrs.push_back(v_global); // if (++ns == 64) { // break; // } // } // // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); // for (int dest = 1; dest < num_hosts; ++dest) { // MPI_Instance::send_buffer_2_dst(selected_nbrs, // dest, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // } // message_time += WallTimer::get_time_mark(); // } //// {//test //// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); //// } // // // Synchronize the used_bp_roots. // for (VertexID v_global : selected_nbrs) { // used_bp_roots[v_global] = 1; // } // // // Mark selected neighbors // for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { // VertexID v_global = selected_nbrs[v_i]; // if (host_id != G.get_master_host_id(v_global)) { // continue; // } // tmp_que[end_tmp_que++] = v_global; // tmp_d[G.get_local_vertex_id(v_global)] = 1; // tmp_s[v_global].first = 1ULL << v_i; // } // } // // // Reduce the global number of active vertices // VertexID global_num_actives = 1; // UnweightedDist d = 0; // while (global_num_actives) { //// for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // // // Send active masters to mirrors // { // std::vector<MsgUnitBP> buffer_send(end_que); // for (VertexID que_i = 0; que_i < end_que; ++que_i) { // VertexID v_global = que[que_i]; // buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); // } //// {// test //// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); //// } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgUnitBP> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgUnitBP &m : buffer_recv) { // VertexID v_global = m.v_global; // if (!G.local_out_degrees[v_global]) { // continue; // } // tmp_s[v_global].first = m.S_n1; // tmp_s[v_global].second = m.S_0; // // Push labels // bit_parallel_push_labels(G, // v_global, // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, // tmp_d, // d); // } //// {// test //// printf("host_id: %u root: %u done push.\n", host_id, root); //// } // } // } // // // Update the sets in tmp_s // { // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; // // } // // Put into the buffer sending to others // std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); //// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1); // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first; // VertexID w = sibling_es[i].second; //// buffer_send.emplace_back(v, tmp_s[v].second); //// buffer_send.emplace_back(w, tmp_s[w].second); // buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); // buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); // } // // Send the messages // for (int root = 0; root < num_hosts; ++root) { // std::vector< std::pair<VertexID, uint64_t> > buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // } ////#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // } ////#endif // // // Swap que and tmp_que // tmp_que.swap(que); // end_que = end_tmp_que; // end_tmp_que = 0; // MPI_Allreduce(&end_que, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // //// } // ++d; // } // // for (VertexID v_local = 0; v_local < num_masters; ++v_local) { // VertexID v_global = G.get_global_vertex_id(v_local); // L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; // L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} // L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } //} //// Function bit parallel checking: //// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking( // VertexID v_id, // VertexID w_id, // const std::vector<IndexType> &L, // UnweightedDist iter) //{ // // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // const IndexType &Lv = L[v_id]; // const IndexType &Lw = L[w_id]; // // _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF. // if (td - 2 <= iter) { // td += // (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 : // ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) | // (Lv.bp_sets[i][1] & Lw.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { //// ++bp_hit_count; // return false; // } // } // } // return true; //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <VertexID BATCH_SIZE> inline VertexID DistBVCPLL<BATCH_SIZE>:: initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots) { // Get the roots_master_local, containing all local roots. std::vector<VertexID> roots_master_local; VertexID size_roots_master_local; VertexID roots_bound = roots_start + roots_size; try { for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { roots_master_local.push_back(G.get_local_vertex_id(r_global)); // {//test // if (1024 == roots_start && 7 == host_id && 31600 == *roots_master_local.rbegin()) { // printf("S0.0 host_id: %d " // "31600 YES!\n", // host_id); // } // } } } size_roots_master_local = roots_master_local.size(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_roots_master_local: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Short_index { if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } else { for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } end_once_candidated_queue = 0; if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } else { for (VertexID r_local : roots_master_local) { short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } } // // Real Index try { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; IndexType &Lr = L[r_local]; Lr.batches.emplace_back( // b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } else { for (VertexID r_local : roots_master_local) { IndexType &Lr = L[r_local]; Lr.batches.emplace_back( // b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_real_index: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Dist Table try { // struct LabelTableUnit { // VertexID root_id; // VertexID label_global_id; // UnweightedDist dist; // // LabelTableUnit() = default; // // LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : // root_id(r), label_global_id(l), dist(d) {} // }; std::vector<LabelTableUnit> buffer_send; // buffer for sending // Dist_matrix { // Deprecated Old method: unpack the IndexType structure before sending. // Okay, it's back. if (size_roots_master_local >= THRESHOLD_PARALLEL) { // Offsets for adding labels to buffer_send in parallel std::vector<VertexID> offsets_beffer_send(size_roots_master_local); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; offsets_beffer_send[i_r] = L[r_local].vertices.size(); } EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send); buffer_send.resize(size_labels); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; VertexID top_location = 0; IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID dist_bound_index = Lr.distances.size(); // for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // buffer_send[offsets_beffer_send[i_r] + top_location++] = // LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist); buffer_send[offsets_beffer_send[i_r] + top_location++] = LabelTableUnit(r_root_id, Lr.vertices[v_i], dist); } } } } } else { for (VertexID r_local : roots_master_local) { // The distance table. IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID dist_bound_index = Lr.distances.size(); // for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table buffer_send.emplace_back(r_root_id, Lr.vertices[v_i], dist); // buffer for sending // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending } } } } } } // Broadcast local roots labels for (int root = 0; root < num_hosts; ++root) { std::vector<LabelTableUnit> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record root_id's number of its received label, for later adding to recved_dist_table __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); // recved_dist_table[root_id].push_back(label_global_id); } // Record the received label in recved_dist_table, for later reset #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID &size = sizes_recved_root_labels[root_id]; if (size) { recved_dist_table[root_id].resize(size); size = 0; } } #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id); } } else { for (const LabelTableUnit &l : buffer_recv) { VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record the received label in recved_dist_table, for later reset recved_dist_table[root_id].push_back(label_global_id); } } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_dist_table: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } { if (bp_labels_table.size() != 0) { ; } } // // Build the Bit-Parallel Labels Table // try // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // // std::vector<VertexID> roots_queue; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // roots_queue.push_back(r_global); // } // VertexID size_roots_queue = roots_queue.size(); // if (size_roots_queue >= THRESHOLD_PARALLEL) { // buffer_send.resize(size_roots_queue); //#pragma omp parallel for // for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) { // VertexID r_global = roots_queue[i_r]; // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Prepare for sending //// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // } else { //// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { //// if (G.get_master_host_id(r_global) != host_id) { //// continue; //// } // for (VertexID r_global : roots_queue) { // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // VertexID size_buffer_recv = buffer_recv.size(); // if (size_buffer_recv >= THRESHOLD_PARALLEL) { //#pragma omp parallel for // for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { // const MsgBPLabel &m = buffer_recv[i_m]; // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } else { // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("initialization_bp_labels_table: bad_alloc " // "host_id: %d " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // host_id, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // Active_queue VertexID global_num_actives = 0; // global number of active vertices. { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; active_queue[i_r] = r_local; } end_active_queue = size_roots_master_local; } else { for (VertexID r_local : roots_master_local) { active_queue[end_active_queue++] = r_local; } { } } // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, // MPI_SUM, MPI_MAX, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); } return global_num_actives; } // Sequential Version //// Function for initializing at the begin of a batch //// For a batch, initialize the temporary labels and real labels of roots; //// traverse roots' labels to initialize distance buffer; //// unset flag arrays is_active and got_labels //template <VertexID BATCH_SIZE> //inline VertexID DistBVCPLL<BATCH_SIZE>:: //initialization( // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated, // VertexID b_id, // VertexID roots_start, // VertexID roots_size, //// std::vector<VertexID> &roots_master_local, // const std::vector<uint8_t> &used_bp_roots) //{ // // Get the roots_master_local, containing all local roots. // std::vector<VertexID> roots_master_local; // VertexID roots_bound = roots_start + roots_size; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { // roots_master_local.push_back(G.get_local_vertex_id(r_global)); // } // } // // Short_index // { // for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // VertexID v_local = once_candidated_queue[v_i]; // short_index[v_local].indicator_reset(); // once_candidated[v_local] = 0; // } // end_once_candidated_queue = 0; // for (VertexID r_local : roots_master_local) { // short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels //// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself //// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels // } // } //// // // Real Index // { // for (VertexID r_local : roots_master_local) { // IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size // Lr.distances.emplace_back( // Lr.vertices.size(), // start_index // 1, // size // 0); // dist // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); // } // } // // // Dist Table // { //// struct LabelTableUnit { //// VertexID root_id; //// VertexID label_global_id; //// UnweightedDist dist; //// //// LabelTableUnit() = default; //// //// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : //// root_id(r), label_global_id(l), dist(d) {} //// }; // std::vector<LabelTableUnit> buffer_send; // buffer for sending // // Dist_matrix // { // // Deprecated Old method: unpack the IndexType structure before sending. // for (VertexID r_local : roots_master_local) { // // The distance table. // IndexType &Lr = L[r_local]; // VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // // Write into the dist_table //// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending // } // } // } // } // } // // Broadcast local roots labels // for (int root = 0; root < num_hosts; ++root) { // std::vector<LabelTableUnit> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const LabelTableUnit &l : buffer_recv) { // VertexID root_id = l.root_id; // VertexID label_global_id = l.label_global_id; // UnweightedDist dist = l.dist; // dist_table[root_id][label_global_id] = dist; // // Record the received label in recved_dist_table, for later reset // recved_dist_table[root_id].push_back(label_global_id); // } // } // } // // // Build the Bit-Parallel Labels Table // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // // // TODO: parallel enqueue // // Active_queue // VertexID global_num_actives = 0; // global number of active vertices. // { // for (VertexID r_local : roots_master_local) { // active_queue[end_active_queue++] = r_local; // } // // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } // // return global_num_actives; //} //// Function: push v_head_global's newly added labels to its all neighbors. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // const BPLabelType &L_label = bp_labels_table[label_root_id]; // VertexID label_global_id = label_root_id + roots_start; // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // if (v_tail_global <= label_global_id) { // // remaining v_tail_global has higher rank than the label // return; // } // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } //// {// Just for the complain from the compiler //// assert(iter >= iter); //// } //} template<VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter) { std::vector<std::pair<VertexID, VertexID> > buffer_send_indices; //.first: Vertex ID //.second: size of labels std::vector<VertexID> buffer_send_labels; if (local_size) { const VertexID start_active_queue = global_start; const VertexID size_active_queue = global_size <= local_size ? global_size : local_size; const VertexID bound_active_queue = start_active_queue + size_active_queue; buffer_send_indices.resize(size_active_queue); // Prepare offset for inserting std::vector<VertexID> offsets_buffer_locs(size_active_queue); #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active const IndexType &Lv = L[v_head_local]; offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; } EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); try { buffer_send_labels.resize(size_buffer_send_labels); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.buffer_send_labels: bad_alloc " "host_id: %d " "size_buffer_send_labels: %lu " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, size_buffer_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Build buffer_send_labels by parallel inserting #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active VertexID v_head_global = G.get_global_vertex_id(v_head_local); const IndexType &Lv = L[v_head_local]; // Prepare the buffer_send_indices VertexID tmp_i_q = i_q - start_active_queue; buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // These 2 index are used for traversing v_head's last inserted labels VertexID l_i_start = Lv.distances.rbegin()->start_index; VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; VertexID top_labels = offsets_buffer_locs[tmp_i_q]; for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { VertexID label_root_id = Lv.vertices[l_i] - roots_start; buffer_send_labels[top_labels++] = label_root_id; // buffer_send_labels.push_back(label_root_id); } } } // { // if (1024 == roots_start) { // printf("host_id: %d " // "in pushing\n", // host_id); // } // } //////////////////////////////////////////////// //// // const VertexID bound_active_queue = start_active_queue + size_active_queue; // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(size_active_queue); //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); //// {// test //// if (0 == host_id) { //// double memtotal = 0; //// double memfree = 0; //// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); //// PADO::Utils::system_memory(memtotal, memfree); //// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", //// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); //// } //// } // buffer_send_labels.resize(size_buffer_send_labels); //// {// test //// if (0 == host_id) { //// printf("buffer_send_labels created.\n"); //// } //// } // // // Build buffer_send_labels by parallel inserting //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID tmp_i_q = i_q - start_active_queue; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // VertexID top_labels = offsets_buffer_locs[tmp_i_q]; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } //// end_active_queue = 0; //// //////////////////////////////////////////////// for (int root = 0; root < num_hosts; ++root) { // Get the indices std::vector<std::pair<VertexID, VertexID> > indices_buffer; { if (1024 == roots_start && 6 == root) { printf("host_id: %u " "root: %d " "buffer_send_indices.size(): %lu " "buffer_send_labels.size(): %lu \n", host_id, root, buffer_send_indices.size(), buffer_send_labels.size()); } caller_line = __LINE__; } one_host_bcasts_buffer_to_buffer(root, buffer_send_indices, indices_buffer); { if (1024 == roots_start && 6 == root) { printf("host_id: %u " "root: %d " "indices_buffer.size(): %lu \n", host_id, root, indices_buffer.size()); } caller_line = __LINE__; } if (indices_buffer.empty()) { continue; } // Get the labels std::vector<VertexID> labels_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_labels, labels_buffer); { if (1024 == roots_start && 6 == root) { printf("host_id: %u " "root: %d " "labels_buffer.size(): %lu \n", host_id, root, labels_buffer.size()); } } VertexID size_indices_buffer = indices_buffer.size(); // Prepare the offsets for reading indices_buffer std::vector<EdgeID> starts_locs_index(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; starts_locs_index[i_i] = e.second; } EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // Prepare the offsets for inserting v_tails into queue std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; } EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); std::vector<VertexID> tmp_got_candidates_queue; std::vector<VertexID> sizes_tmp_got_candidates_queue; std::vector<VertexID> tmp_once_candidated_queue; std::vector<VertexID> sizes_tmp_once_candidated_queue; try { tmp_got_candidates_queue.resize(num_ngbrs); sizes_tmp_got_candidates_queue.resize(size_indices_buffer, 0); tmp_once_candidated_queue.resize(num_ngbrs); sizes_tmp_once_candidated_queue.resize(size_indices_buffer, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.tmp_queues: bad_alloc " "host_id: %d " "num_ngbrs: %lu " "size_indices_buffer: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, num_ngbrs, size_indices_buffer, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { VertexID v_head_global = indices_buffer[i_i].first; EdgeID start_index = starts_locs_index[i_i]; EdgeID bound_index = i_i != size_indices_buffer - 1 ? starts_locs_index[i_i + 1] : total_recved_labels; if (G.local_out_degrees[v_head_global]) { local_push_labels_para( v_head_global, start_index, bound_index, roots_start, labels_buffer, G, short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, tmp_got_candidates_queue, sizes_tmp_got_candidates_queue[i_i], offsets_tmp_queue[i_i], got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_i], once_candidated, bp_labels_table, used_bp_roots, iter); } } {// Collect elements from tmp_got_candidates_queue to got_candidates_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); PADO::collect_into_queue( tmp_got_candidates_queue, offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue total_new, got_candidates_queue, end_got_candidates_queue); } {// Collect elements from tmp_once_candidated_queue to once_candidated_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); PADO::collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue total_new, once_candidated_queue, end_once_candidated_queue); } } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { { if (bp_labels_table.size() != 0) { ; } } // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator[label_root_id] = 1; {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local; } // once_candidated[v_tail_local] = 1; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; // break; // } // } // } // if (no_need_add) { // continue; // } // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = 1; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into got_candidates queue // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = 1; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } if (!got_candidates[v_tail_local]) { if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local; } } } } { assert(iter >= iter); } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail_global SI_v_tail.indicator[label_root_id] = 1; // SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in once_candidated[v_tail_local] = 1; once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } if (SI_v_tail.is_candidate[label_root_id]) { continue; } SI_v_tail.is_candidate[label_root_id] = 1; SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!got_candidates[v_tail_local]) { // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) got_candidates[v_tail_local] = 1; got_candidates_queue[end_got_candidates_queue++] = v_tail_local; } } } // { // assert(iter >= iter); // } } //// Function: pushes v_head's labels to v_head's every (master) neighbor //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // // The data structure of a message //// std::vector< LabelUnitType > buffer_recv; // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin() -> start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // // Traverse v_head's every neighbor v_tail // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // // Traverse v_head's last inserted labels // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // VertexID label_global_id = label_root_id + roots_start; // if (v_tail_global <= label_global_id) { // // v_tail_global has higher rank than the label // continue; // } // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } // } // // { // assert(iter >= iter); // } //} //// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts //// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all //// code of this function into the caller, all messages become right. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //sync_masters_2_mirrors( // const DistGraph &G, // const std::vector<VertexID> &active_queue, // VertexID end_active_queue, // std::vector< std::pair<VertexID, VertexID> > &buffer_send, // std::vector<MPI_Request> &requests_send //) //{ //// std::vector< std::pair<VertexID, VertexID> > buffer_send; // // pair.first: Owener vertex ID of the label // // pair.first: label vertex ID of the label // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send.emplace_back(v_head_global, label_root_id); //// {//test //// if (1 == host_id) { //// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);// //// } //// } // } // } // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // assert(!requests_send.empty()); // } // // // Send messages // for (int loc = 0; loc < num_hosts - 1; ++loc) { // int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc); // MPI_Isend(buffer_send.data(), // MPI_Instance::get_sending_size(buffer_send), // MPI_CHAR, // dest_host_id, // SENDING_MASTERS_TO_MIRRORS, // MPI_COMM_WORLD, // &requests_send[loc]); // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // } // } //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter) { const VertexID bound_got_candidates_queue = start_got_candidates_queue + size_got_candidates_queue; std::vector<VertexID> offsets_tmp_active_queue; std::vector<VertexID> tmp_active_queue; std::vector<VertexID> sizes_tmp_active_queue; std::vector<EdgeID> offsets_tmp_buffer_send; std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; std::vector<EdgeID> sizes_tmp_buffer_send; EdgeID total_send_labels; try { offsets_tmp_active_queue.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_got_candidates_queue; ++i_q) { offsets_tmp_active_queue[i_q] = i_q; } tmp_active_queue.resize(size_got_candidates_queue); sizes_tmp_active_queue.resize(size_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually. // Prepare for parallel buffer_send // std::vector<EdgeID> offsets_tmp_buffer_send(size_got_candidates_queue); offsets_tmp_buffer_send.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = start_got_candidates_queue; i_q < bound_got_candidates_queue; ++i_q) { VertexID v_id_local = got_candidates_queue[i_q]; VertexID v_global_id = G.get_global_vertex_id(v_id_local); VertexID tmp_i_q = i_q - start_got_candidates_queue; if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // If v_global_id is root, its new labels should be put into buffer_send offsets_tmp_buffer_send[tmp_i_q] = short_index[v_id_local].end_candidates_que; } else { offsets_tmp_buffer_send[tmp_i_q] = 0; } } total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); tmp_buffer_send.resize(total_send_labels); sizes_tmp_buffer_send.resize(size_got_candidates_queue, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_tmp_buffer_send: bad_alloc " "host_id: %d " "iter: %u " "size_got_candidates_queue: %u " "total_send_labels: %lu " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, host_id, iter, size_got_candidates_queue, total_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_queue = start_got_candidates_queue; i_queue < bound_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID tmp_i_queue = i_queue - start_got_candidates_queue; VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; tmp_active_queue[tmp_i_queue + sizes_tmp_active_queue[tmp_i_queue]++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_para( cand_root_id, v_id_local, roots_start, roots_size, G, tmp_buffer_send, sizes_tmp_buffer_send[tmp_i_queue], offsets_tmp_buffer_send[tmp_i_queue]); // buffer_send); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, // b_id, iter); } } {// Collect elements from tmp_active_queue to active_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); PADO::collect_into_queue( tmp_active_queue, offsets_tmp_active_queue, sizes_tmp_active_queue, total_new, active_queue, end_active_queue); } {// Collect elements from tmp_buffer_send to buffer_send EdgeID old_size_buffer_send = buffer_send.size(); EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); try { buffer_send.resize(total_new + old_size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_buffer_send: bad_alloc " "iter: %u " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, iter, host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // EdgeID zero_size = 0; PADO::collect_into_queue( tmp_buffer_send, offsets_tmp_buffer_send, sizes_tmp_buffer_send, total_new, buffer_send, old_size_buffer_send); // zero_size); } } // Function for distance query; // traverse vertex v_id's labels; // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <VertexID BATCH_SIZE> inline bool DistBVCPLL<BATCH_SIZE>:: distance_query( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id_local]; // Traverse v_id's all existing labels VertexID b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); //_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lv.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_table for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID dist_bound_index = Lv.distances.size(); // for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { UnweightedDist dist = Lv.distances[dist_i].dist; // Cannot use this, because no batch_id any more, so distances are not all in order among batches. if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } VertexID v_start_index = Lv.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); _mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0); for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_table[cand_root_id][v] does not exist. continue; } VertexID d_tmp = dist + dist_table[cand_root_id][v]; if (d_tmp <= iter) { return false; } } } } return true; } //// Sequential version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_seq( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send) // UnweightedDist iter) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // dist_table[v_root_id][cand_real_id] = iter; // Put the update into the buffer_send for later sending buffer_send.emplace_back(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_seq: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } //// Parallel Version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // Put the update into the buffer_send for later sending tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_para: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: update_label_indices( const VertexID v_id_local, const VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter) { try { IndexType &Lv = L[v_id_local]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id_local].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin() -> size); } else { short_index[v_id_local].indicator[BATCH_SIZE] = 1; // short_index[v_id_local].indicator.set(BATCH_SIZE); // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.emplace_back( // b_id, // batch id Lv.distances.size(), // start index 1); // size } // Insert a new distance element with start_index, size, and dist Lv.distances.emplace_back( Lv.vertices.size() - inserted_count, // start index inserted_count, // size iter); // distance } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("update_label_indices: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function to reset dist_table the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue) { // // Reset dist_table according to local masters' labels // for (VertexID r_local_id : roots_master_local) { // IndexType &Lr = L[r_local_id]; // VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST; // } // } // } // } // Reset dist_table according to received masters' labels from other hosts for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { for (VertexID cand_real_id : recved_dist_table[r_root_id]) { dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST; } recved_dist_table[r_root_id].clear(); } // // Reset bit-parallel labels table // for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { // memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist)); // memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets)); // } // Remove labels of local minimum set for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local_id = once_candidated_queue[v_i]; if (!G.is_local_minimum[v_local_id]) { continue; } // L[v_local_id].clean_all_indices(); } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, // start id of roots const VertexID roots_size, // how many roots in the batch const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated) // std::vector<bool> &once_candidated) { // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // The Maximum of active vertices among hosts. VertexID global_num_actives = initialization(G, short_index, dist_table, recved_dist_table, bp_labels_table, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, // b_id, roots_start, roots_size, // roots_master_local, used_bp_roots); // initializing_time += WallTimer::get_time_mark(); UnweightedDist iter = 0; // The iterator, also the distance for current iteration {//test if (0 == host_id) { printf("host_id: %u " "b_i: %u " "initialization finished.\n", host_id, roots_start / BATCH_SIZE); } } while (global_num_actives) { ++iter; {// Limit the distance if (iter > 2 ) { if (end_active_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { VertexID v_id_local = active_queue[i_q]; is_active[v_id_local] = 0; } } else { for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { VertexID v_id_local = active_queue[i_q]; is_active[v_id_local] = 0; } } end_active_queue = 0; break; } } //#ifdef DEBUG_MESSAGES_ON // {//test //// if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("iter: %u " // "host_id: %d " // "global_num_actives: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // iter, // host_id, // global_num_actives, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); //// } // } //#endif // Traverse active vertices to push their labels as candidates // Send masters' newly added labels to other hosts try { // scatter_time -= WallTimer::get_time_mark(); ////// Multiple pushing // // Divide the pushing into many-time runs, to reduce the peak memory footprint. // const VertexID chunk_size = 1 << 20; // VertexID remainder = global_num_actives % chunk_size; // VertexID bound_global_i = global_num_actives - remainder; //// VertexID remainder = end_active_queue % chunk_size; //// VertexID bound_active_queue = end_active_queue - remainder; // VertexID local_size; // for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) { // if (global_i < end_active_queue) { // local_size = end_active_queue - global_i; // } else { // local_size = 0; // } //// {//test //// if (1024 == roots_start && 7 == host_id) { //// printf("S0 host_id: %d global_i: %u bound_global_i: %u local_size: %u\n", //// host_id, global_i, bound_global_i, local_size); //// } //// } // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // global_i, // chunk_size, // local_size, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); // } // if (remainder) { // if (bound_global_i < end_active_queue) { // local_size = end_active_queue - bound_global_i; // } else { // local_size = 0; // } // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // bound_global_i, // remainder, // local_size, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); // } //// Single pushing schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, 0, global_num_actives, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); end_active_queue = 0; // scatter_time += WallTimer::get_time_mark(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("pushing: bad_alloc " "iter: %u " "host_id: %d " "global_num_actives: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", iter, host_id, global_num_actives, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } {//test if (0 == host_id) { printf("host_id: %u pushing finished...\n", host_id); } } // Traverse vertices in the got_candidates_queue to insert labels { // gather_time -= WallTimer::get_time_mark(); std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // pair.first: root id // pair.second: label (global) id of the root if (end_got_candidates_queue >= THRESHOLD_PARALLEL) { ////// Multiple checking/inserting // const VertexID chunk_size = 1 << 20; // VertexID remainder = end_got_candidates_queue % chunk_size; // VertexID bound_i_q = end_got_candidates_queue - remainder; // for (VertexID i_q = 0; i_q < bound_i_q; i_q += chunk_size) { // schedule_label_inserting_para( // G, // roots_start, // roots_size, // short_index, // dist_table, // got_candidates_queue, // i_q, // chunk_size, // got_candidates, // active_queue, // end_active_queue, // is_active, // buffer_send, // iter); // } // if (remainder) { // schedule_label_inserting_para( // G, // roots_start, // roots_size, // short_index, // dist_table, // got_candidates_queue, // bound_i_q, // remainder, // got_candidates, // active_queue, // end_active_queue, // is_active, // buffer_send, // iter); // } //// Single checking/inserting schedule_label_inserting_para( G, roots_start, roots_size, short_index, dist_table, got_candidates_queue, 0, end_got_candidates_queue, got_candidates, active_queue, end_active_queue, is_active, buffer_send, iter); ////// Backup // // Prepare for parallel active_queue // // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already. // // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it. // std::vector<VertexID> offsets_tmp_active_queue; // std::vector<VertexID> tmp_active_queue; // std::vector<VertexID> sizes_tmp_active_queue; // std::vector<EdgeID> offsets_tmp_buffer_send; // std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; // std::vector<EdgeID> sizes_tmp_buffer_send; // EdgeID total_send_labels; // // try { // offsets_tmp_active_queue.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // offsets_tmp_active_queue[i_q] = i_q; // } // tmp_active_queue.resize(end_got_candidates_queue); // sizes_tmp_active_queue.resize(end_got_candidates_queue, // 0); // Size will only be 0 or 1, but it will become offsets eventually. // // // Prepare for parallel buffer_send //// std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue); // offsets_tmp_buffer_send.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // VertexID v_id_local = got_candidates_queue[i_q]; // VertexID v_global_id = G.get_global_vertex_id(v_id_local); // if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // // If v_global_id is root, its new labels should be put into buffer_send // offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que; // } else { // offsets_tmp_buffer_send[i_q] = 0; // } // } // total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); // tmp_buffer_send.resize(total_send_labels); // sizes_tmp_buffer_send.resize(end_got_candidates_queue, 0); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_tmp_buffer_send: bad_alloc " // "host_id: %d " // "iter: %u " // "end_got_candidates_queue: %u " // "total_send_labels: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // host_id, // iter, // end_got_candidates_queue, // total_send_labels, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // //#pragma omp parallel for // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if (distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter)) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; //// active_queue[end_active_queue++] = v_id_local; // tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only_para( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, // tmp_buffer_send, // sizes_tmp_buffer_send[i_queue], // offsets_tmp_buffer_send[i_queue]); //// buffer_send); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, //// short_index, //// b_id, // iter); // } // } // // {// Collect elements from tmp_active_queue to active_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); // PADO::collect_into_queue( // tmp_active_queue, // offsets_tmp_active_queue, // sizes_tmp_active_queue, // total_new, // active_queue, // end_active_queue); // } // {// Collect elements from tmp_buffer_send to buffer_send // EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); // try { // buffer_send.resize(total_new); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_buffer_send: bad_alloc " // "iter: %u " // "host_id: %d " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // iter, // host_id, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // EdgeID zero_size = 0; // PADO::collect_into_queue( // tmp_buffer_send, // offsets_tmp_buffer_send, // sizes_tmp_buffer_send, // total_new, // buffer_send, // zero_size); // } } else { for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; active_queue[end_active_queue++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_seq( cand_root_id, v_id_local, roots_start, roots_size, G, // dist_table, buffer_send); // iter); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, // b_id, iter); } } } // {//test // printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); // } end_got_candidates_queue = 0; // Set the got_candidates_queue empty // Sync the dist_table for (int root = 0; root < num_hosts; ++root) { std::vector<std::pair<VertexID, VertexID>> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); try { if (size_buffer_recv >= THRESHOLD_PARALLEL) { // Get label number for every root std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); } // Resize the recved_dist_table for every root #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID old_size = recved_dist_table[root_id].size(); VertexID tmp_size = sizes_recved_root_labels[root_id]; if (tmp_size) { recved_dist_table[root_id].resize(old_size + tmp_size); sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // If tmp_size == 0, root_id has no received labels. // sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // Recorde received labels in recved_dist_table #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id); } } else { for (const std::pair<VertexID, VertexID> &e : buffer_recv) { VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; // Record the received element, for future reset recved_dist_table[root_id].push_back(cand_real_id); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("recved_dist_table: bad_alloc " "host_id: %d " "iter: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, iter, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Sync the global_num_actives // message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_MAX, // MPI_SUM, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // gather_time += WallTimer::get_time_mark(); } {//test // if (0 == host_id) { printf("host_id: %u " "iter: %u inserting labels finished.\n", host_id, iter); // } } } // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); reset_at_end( G, // roots_start, // roots_master_local, dist_table, recved_dist_table, bp_labels_table, once_candidated_queue, end_once_candidated_queue); // clearup_time += WallTimer::get_time_mark(); // {//test // if (0 == host_id) { // printf("host_id: %u resetting finished.\n", host_id); // } // } } //// Sequential Version //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //batch_process( // const DistGraph &G, // VertexID b_id, // VertexID roots_start, // start id of roots // VertexID roots_size, // how many roots in the batch // const std::vector<uint8_t> &used_bp_roots, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<uint8_t> &got_candidates, //// std::vector<bool> &got_candidates, // std::vector<uint8_t> &is_active, //// std::vector<bool> &is_active, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated) //// std::vector<bool> &once_candidated) //{ // // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // VertexID global_num_actives = initialization(G, // short_index, // dist_table, // recved_dist_table, // bp_labels_table, // active_queue, // end_active_queue, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // b_id, // roots_start, // roots_size, //// roots_master_local, // used_bp_roots); // initializing_time += WallTimer::get_time_mark(); // UnweightedDist iter = 0; // The iterator, also the distance for current iteration //// {//test //// printf("host_id: %u initialization finished.\n", host_id); //// } // // // while (global_num_actives) { ////#ifdef DEBUG_MESSAGES_ON //// {// //// if (0 == host_id) { //// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives); //// } //// } ////#endif // ++iter; // // Traverse active vertices to push their labels as candidates // // Send masters' newly added labels to other hosts // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // scatter_time += WallTimer::get_time_mark(); // } // // // Traverse vertices in the got_candidates_queue to insert labels // { // gather_time -= WallTimer::get_time_mark(); // std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // // pair.first: root id // // pair.second: label (global) id of the root // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter) ) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, //// dist_table, // buffer_send); //// iter); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, // short_index, // b_id, // iter); // } // } //// {//test //// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); //// } // end_got_candidates_queue = 0; // Set the got_candidates_queue empty // // Sync the dist_table // for (int root = 0; root < num_hosts; ++root) { // std::vector<std::pair<VertexID, VertexID>> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, VertexID> &e : buffer_recv) { // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // dist_table[root_id][cand_real_id] = iter; // // Record the received element, for future reset // recved_dist_table[root_id].push_back(cand_real_id); // } // } // // // Sync the global_num_actives // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); // } // } // // // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); // reset_at_end( //// G, //// roots_start, //// roots_master_local, // dist_table, // recved_dist_table, // bp_labels_table); // clearup_time += WallTimer::get_time_mark(); //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Every host h_i broadcast to others // for (int root = 0; root < num_hosts; ++root) { // std::vector<E_T> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } //// uint64_t size_buffer_send = buffer_send.size(); //// // Sync the size_buffer_send. //// message_time -= WallTimer::get_time_mark(); //// MPI_Bcast(&size_buffer_send, //// 1, //// MPI_UINT64_T, //// root, //// MPI_COMM_WORLD); //// message_time += WallTimer::get_time_mark(); ////// {// test ////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); ////// } //// if (!size_buffer_send) { //// continue; //// } //// message_time -= WallTimer::get_time_mark(); //// std::vector<E_T> buffer_recv(size_buffer_send); //// if (host_id == root) { //// buffer_recv.assign(buffer_send.begin(), buffer_send.end()); //// } //// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; //// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) { //// // Only need 1 broadcast //// //// MPI_Bcast(buffer_recv.data(), //// bytes_buffer_send, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// } else { //// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; //// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; //// size_t offset = 0; //// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { ////// size_t offset = b_i * unit_buffer_size; //// size_t size_unit_buffer = b_i == num_unit_buffers - 1 //// ? size_buffer_send - offset //// : unit_buffer_size; //// MPI_Bcast(buffer_recv.data() + offset, //// size_unit_buffer * ETypeSize, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// offset += unit_buffer_size; //// } //// } //// message_time += WallTimer::get_time_mark(); // for (const E_T &e : buffer_recv) { // fun(e); // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // // Every host sends to others // for (int src = 0; src < num_hosts; ++src) { // if (host_id == src) { // // Send from src // message_time -= WallTimer::get_time_mark(); // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, host_id); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // } // message_time += WallTimer::get_time_mark(); // } else { // // Receive from src // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, src); // if (host_id == dst) { // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // Every host sends (num_hosts - 1) times // for (int hop = 1; hop < num_hosts; ++hop) { // int src = hop_2_me_host_id(-hop); // int dst = hop_2_me_host_id(hop); // if (src != dst) { // Normal case // // When host_id is odd, first receive, then send. // if (static_cast<uint32_t>(host_id) & 1U) { // message_time -= WallTimer::get_time_mark(); // // Receive first. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // // Send then. // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // When host_id is even, first send, then receive. // // Send first. // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // // Receive then. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } else { // If host_id is higher than dst, first send, then receive // // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2. // if (host_id < dst) { // // Send // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Receive // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // Otherwise, if host_id is lower than dst, first receive, then send // // Receive // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Send // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } //} //// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // // Every host h_i broadcast to others // for (int h_i = 0; h_i < num_hosts; ++h_i) { // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); //// {// test //// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); //// } // if (!size_buffer_send) { // continue; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // message_time -= WallTimer::get_time_mark(); // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == h_i) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // // Process every element of unit_buffer // for (const E_T &e : unit_buffer) { // fun(e); // } // } // } //} // Function: Host root broadcasts its sending buffer to a receiving buffer. template <VertexID BATCH_SIZE> template <typename E_T> inline void DistBVCPLL<BATCH_SIZE>:: one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv) { const size_t ETypeSize = sizeof(E_T); volatile uint64_t size_buffer_send = 0; if (host_id == root) { size_buffer_send = buffer_send.size(); } // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // {//test // if (6 == root && 6 == host_id) { //// if (0 == root && size_buffer_send == 16 && 0 == host_id) { // printf("before: host_id: %d size_buffer_send: %lu\n", // host_id, // size_buffer_send); // } // } MPI_Bcast((void *) &size_buffer_send, 1, MPI_UINT64_T, root, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // {//test //// if (0 == root && size_buffer_send == 16 && 0 == host_id) { // if (6 == root && 6 == host_id) { //// if (0 == root && size_buffer_send == 16 && 0 == host_id) { // printf("after: host_id: %d size_buffer_send: %lu\n", // host_id, // size_buffer_send); // } // } try { buffer_recv.resize(size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("one_host_bcasts_buffer_to_buffer: bad_alloc " "host_id: %d " "root: %d " "caller_line: %lu " "size_buffer_send: %lu " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, root, caller_line, size_buffer_send, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } if (!size_buffer_send) { return; } // Broadcast the buffer_send // message_time -= WallTimer::get_time_mark(); if (host_id == root) { // buffer_recv.assign(buffer_send.begin(), buffer_send.end()); buffer_recv.swap(buffer_send); } uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) { // Only need 1 broadcast MPI_Bcast(buffer_recv.data(), bytes_buffer_send, MPI_CHAR, root, MPI_COMM_WORLD); } else { const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; size_t offset = 0; for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { size_t size_unit_buffer = b_i == num_unit_buffers - 1 ? size_buffer_send - offset : unit_buffer_size; MPI_Bcast(buffer_recv.data() + offset, size_unit_buffer * ETypeSize, MPI_CHAR, root, MPI_COMM_WORLD); offset += unit_buffer_size; } } // message_time += WallTimer::get_time_mark(); } } #endif //PADO_DPADO_H
symv_x_dia_n_hi_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis == 0) { const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX return ONAME_omp(alpha, A, x, beta, y); #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
GB_unop__ainv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_uint64_uint64) // op(A') function: GB (_unop_tran__ainv_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ijk_optimize.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int A_row; int A_col; int B_row; int B_col; int **constructMatrix(int row, int col){ int **matrix = (int **)malloc(sizeof(int *) * row); for (int i = 0; i < row;i++){ matrix[i] = (int *)malloc(sizeof(int) * col); } return matrix; } void freeMatrix(int **matrix, int row, int col){ for (int i = 0; i < row;i++){ free(matrix[i]); } free(matrix); } int main(int argc, char *argv[]){ A_row = atoi(*(argv + 1)); A_col = atoi(*(argv + 2)); B_row = atoi(*(argv + 3)); B_col = atoi(*(argv + 4)); int number_of_threads = atoi(*(argv + 5)); FILE *input = fopen("matrix", "r"); int **A = constructMatrix(A_row, A_col); int **B = constructMatrix(B_row, B_col); int **C = constructMatrix(A_row, B_col); //read A for (int i = 0; i < A_row;i++){ for (int j = 0; j < A_col;j++){ fscanf(input, "%d", &A[i][j]); } } //read B for (int i = 0; i < B_row;i++){ for (int j = 0; j < B_col;j++){ fscanf(input, "%d", &B[i][j]); } } fclose(input); double start_time = omp_get_wtime(); //multiply: int i, j, k; int sum; #pragma omp parallel for shared(A,B,C) private(i,j,k,sum) num_threads(number_of_threads) for (i = 0; i < A_row;i++){ for (j = 0; j <B_col;j++){ sum = 0; for (k = 0; k < A_col;k++){ sum += A[i][k] * B[k][j]; } C[i][j] = sum; } } double end_time = omp_get_wtime(); printf("%s: %g sec.\n", "ijk_optimize_runtime", end_time - start_time); //output the result to compare with golden result FILE *out = fopen("ijk_optimize_result", "w"); for (int i = 0; i < A_row;i++){ for (int j = 0; j < B_col;j++){ fprintf(out, "%d ", C[i][j]); } fprintf(out, "\n"); } fprintf(out, "\n"); fclose(out); freeMatrix(A, A_row, A_col); freeMatrix(B, B_row, B_col); freeMatrix(C, A_row, B_col); return 0; }
GB_unaryop__lnot_int8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_int32 // op(A') function: GB_tran__lnot_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_int32 ( int8_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_uint64 // op(A') function: GB_tran__lnot_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_uint64 ( int16_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kernel_cpu_2.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } // #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #include <omp.h> // (in directory known to compiler) #include <stdlib.h> // (in directory known to compiler) #include <assert.h> #include <stdio.h> //======================================================================================================================================================150 // COMMON //======================================================================================================================================================150 #include "common.h" // (in directory provided here) //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "timer.h" // (in directory provided here) needed by timer //======================================================================================================================================================150 // HEADER //======================================================================================================================================================150 #include "./kernel_cpu_2.h" // (in directory provided here) //========================================================================================================================================================================================================200 // PLASMAKERNEL_GPU //========================================================================================================================================================================================================200 void kernel_cpu_2( int cores_arg, knode *knodes, long knodes_elem, int order, long maxheight, int count, long *currKnode, long *offset, long *lastKnode, long *offset_2, int *start, int *end, int *recstart, int *reclength) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // timer long long time0; long long time1; long long time2; // common variables int i; time0 = get_time(); //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; { time1 = get_time(); //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; // process number of querries { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for private (i, thid) for(bid = 0; bid < count; bid++){ // process levels of the tree for(i = 0; i < maxheight; i++){ // process all leaves at each level for(thid = 0; thid < threadsPerBlock; thid++){ if((knodes[currKnode[bid]].keys[thid] <= start[bid]) && (knodes[currKnode[bid]].keys[thid+1] > start[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[currKnode[bid]].indices[thid] < knodes_elem){ offset[bid] = knodes[currKnode[bid]].indices[thid]; } } if((knodes[lastKnode[bid]].keys[thid] <= end[bid]) && (knodes[lastKnode[bid]].keys[thid+1] > end[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[lastKnode[bid]].indices[thid] < knodes_elem){ offset_2[bid] = knodes[lastKnode[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; lastKnode[bid] = offset_2[bid]; } // process leaves for(thid = 0; thid < threadsPerBlock; thid++){ // Find the index of the starting record if(knodes[currKnode[bid]].keys[thid] == start[bid]){ recstart[bid] = knodes[currKnode[bid]].indices[thid]; } } // process leaves for(thid = 0; thid < threadsPerBlock; thid++){ // Find the index of the ending record if(knodes[lastKnode[bid]].keys[thid] == end[bid]){ reclength[bid] = knodes[lastKnode[bid]].indices[thid] - recstart[bid]+1; } } } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma93_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } time2 = get_time(); } //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of CPU/MCPU KERNEL:\n"); printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time2-time0) * 100); printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time2-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time2-time0) / 1000000); } // main //========================================================================================================================================================================================================200 // END //========================================================================================================================================================================================================200 // #ifdef __cplusplus // } // #endif
chemio.c
//////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <math.h> #include <float.h> #include <omp.h> #include <limits.h> #include "types.h" #include "util.h" #include "vector.h" #include "voxelizer.h" #include "chemio.h" //////////////////////////////////////////////////////////// // Input //////////////////////////////////////////////////////////// void read_inference(state_t *state, char *inf) { char buf[1024]; FILE *f; int l,x,y,z; double bind,xd,yd,zd; // Open input file. if( !(f=fopen(inf,"r")) ) { Error("read_inference(): Failed to open input file.\n"); } // Read a line at a time for(l=0; (fgets(buf,sizeof(buf),f) != NULL); l++) { // Clip off the new line and any carraige return. buf[strlen(buf)-1] = '\0'; if( buf[strlen(buf)-1] == '\r' ) { buf[strlen(buf)-1] = '\0'; } // Parse the line as a simple 5-tuple if( sscanf(buf,"%lf, %lf, %lf, %lf\n",&xd,&yd,&zd,&bind) != 4 ) { Error("read_inference(): Failed to parse line %d!\n",l); } x = xd; y = yd; z = zd; // Enlarge inference list. state->ninference++; if( !(state->inference = realloc(state->inference,state->ninference*sizeof(vector4_t))) ) { Error("read_inference(): Failed to enlarge inference array (%d)!\n",state->ninference); } // Save the x,y,z coord of the prediction. state->inference[state->ninference-1].s.x = x; state->inference[state->ninference-1].s.y = y; state->inference[state->ninference-1].s.z = z; // Save the prediction in the inference array. state->inference[state->ninference-1].s.w = bind; } // Close inference data file. fclose(f); } //////////////////////////////////////////////////////////// void read_pdb(state_t *state, char *pdb) { char buf[1024],tmp[128]; FILE *f; int i,j,l,a,t; // Open input PDB file. if( !(f=fopen(pdb,"r")) ) { Error("read_pdb(): Failed to open input PDB file.\n"); } // Save a clean version of file name. state->pfn = strdup(pdb); for(i=strlen(state->pfn)-1; (i >= 0) && (state->pfn[i] != '/'); i--); if( state->pfn[i] == '/' ) { i++; } state->pfn = &(state->pfn[i]); for(i=strlen(state->pfn)-1; (i >= 0) && (state->pfn[i] != '.'); i--); if( state->pfn[i] == '.' ) { state->pfn[i] = '\0'; } // Consider the file a line at a time. for(l=0; (fgets(buf,sizeof(buf),f) != NULL); l++) { buf[strlen(buf)-1] = '\0'; if( buf[strlen(buf)-1] == '\r' ) { buf[strlen(buf)-1] = '\0'; } if( strncmp(buf,"ENDMDL",strlen("ENDMLD")) == 0 ) { // For now, just stop parsing after the first "model" is read in. break; } else if( strncmp(buf,"ATOM",strlen("ATOM")) == 0 ) { // // Parse "ATOM" lines (protein). // // ATOM 10143 N GLY A 831 39.912 17.726 32.558 1.00-0.730 N // ATOM 10145 C GLY A 831 42.161 18.193 33.468 1.00 0.449 C if( strlen(buf) != 80 ) { Error("read_pdb(): ATOM line not 80 cols (%d) (l %d).\n",strlen(buf),l); } state->natoms++; if( !(state->atoms=realloc(state->atoms,state->natoms*sizeof(atom_t))) ) { Error("read_pdb(): Grow of atoms array failed.\n"); } memset(&(state->atoms[state->natoms-1]),0,sizeof(atom_t)); // ID. memcpy(tmp,buf+6,5); tmp[5] = '\0'; if( sscanf(tmp,"%d",&(state->atoms[state->natoms-1].id)) != 1 ) { Error("read_pdb(): Parse ATOM ID failed (\"%s\"). (l %d)\n",tmp,l); } // Atom type. memcpy(tmp,buf+76,2); tmp[2] = '\0'; state->atoms[state->natoms-1].type = tmp[1]; // X coord. memcpy(tmp,buf+30,8); tmp[8] = '\0'; if( sscanf(tmp,"%lf",&(state->atoms[state->natoms-1].pos.s.x)) != 1 ) { Error("read_pdb(): Parse ATOM x-pos failed (\"%s\"). (l %d)\n",tmp,l); } // Y coord. memcpy(tmp,buf+38,8); tmp[8] = '\0'; if( sscanf(tmp,"%lf",&(state->atoms[state->natoms-1].pos.s.y)) != 1 ) { Error("read_pdb(): Parse ATOM y-pos failed (\"%s\"). (l %d)\n",tmp,l); } // Z coord. memcpy(tmp,buf+46,8); tmp[8] = '\0'; if( sscanf(tmp,"%lf",&(state->atoms[state->natoms-1].pos.s.z)) != 1 ) { Error("read_pdb(): Parse ATOM z-pos failed (\"%s\"). (l %d)\n",tmp,l); } // Charge. memcpy(tmp,buf+78,2); tmp[2] = '\0'; if( tmp[0] != ' ' ) { if( sscanf(tmp,"%d",&(state->atoms[state->natoms-1].charge)) != 1 ) { Error("read_pdb(): Parse ATOM charge failed (\"%s\"). (l %d)\n",tmp,l); } } else { state->atoms[state->natoms-1].charge = 0; } if( tmp[1] == '-' ) { state->atoms[state->natoms-1].charge *= -1; } } else if( strncmp(buf,"HETATM",strlen("HETATM")) == 0 ) { // // Parse "HETATOM" lines (ligand). // // HETATM10211 C2A FA9 A 1 19.653 34.274 30.097 0.50 0.455 C if( strlen(buf) != 80 ) { Error("read_pdb(): HETATM line not 80 cols (%d) (l %d).\n",strlen(buf),l); } state->nligand++; if( !(state->ligand=realloc(state->ligand,state->nligand*sizeof(atom_t))) ) { Error("read_pdb(): Grow of ligand array failed.\n"); } memset(&(state->ligand[state->nligand-1]),0,sizeof(atom_t)); // ID. memcpy(tmp,buf+6,5); tmp[5] = '\0'; if( sscanf(tmp,"%d",&(state->ligand[state->nligand-1].id)) != 1 ) { Error("read_pdb(): Parse HETATM ID failed (\"%s\"). (l %d)\n",tmp,l); } // Atom type. memcpy(tmp,buf+76,2); tmp[2] = '\0'; state->ligand[state->nligand-1].type = tmp[1]; if( tmp[0] == 'Z' && tmp[1] == 'N' ) { state->ligand[state->nligand-1].type = 'Z'; } // X coord. memcpy(tmp,buf+30,8); tmp[8] = '\0'; if( sscanf(tmp,"%lf",&(state->ligand[state->nligand-1].pos.s.x)) != 1 ) { Error("read_pdb(): Parse HETATM x-pos failed (\"%s\"). (l %d)\n",tmp,l); } // Y coord. memcpy(tmp,buf+38,8); tmp[8] = '\0'; if( sscanf(tmp,"%lf",&(state->ligand[state->nligand-1].pos.s.y)) != 1 ) { Error("read_pdb(): Parse HETATM y-pos failed (\"%s\"). (l %d)\n",tmp,l); } // Z coord. memcpy(tmp,buf+46,8); tmp[8] = '\0'; if( sscanf(tmp,"%lf",&(state->ligand[state->nligand-1].pos.s.z)) != 1 ) { Error("read_pdb(): Parse HETATM z-pos failed (\"%s\"). (l %d)\n",tmp,l); } // Charge. memcpy(tmp,buf+78,2); tmp[2] = '\0'; if( tmp[0] != ' ' ) { if( sscanf(tmp,"%d",&(state->ligand[state->nligand-1].charge)) != 1 ) { Error("read_pdb(): Parse HETATM charge failed (\"%s\"). (l %d)\n",tmp,l); } } else { state->ligand[state->nligand-1].charge = 0; } if( tmp[1] == '-' ) { state->ligand[state->nligand-1].charge *= -1; } } else if( strncmp(buf,"CONECT",strlen("CONECT")) == 0 ) { // // Parse "CONNECT" lines (ligand). // // CONECT1015610153101531015510157 for(i=strlen(buf)-1; i >= 0; i--) { if( !isspace(buf[i]) ) { buf[i+1] = '\0'; break; } } if( strlen(buf) < 16 ) { Error("read_pdb(): CONNECT line < 16 cols (%d) (l %d).\n",strlen(buf),l); } if( (strlen(buf)-strlen("CONECT"))%5 != 0 ) { Error("read_pdb(): CONNECT line has unexpected cols (%d) (l %d).\n",strlen(buf),l); } // Start. memcpy(tmp,buf+6,5); tmp[5] = '\0'; if( sscanf(tmp,"%d",&a) != 1 ) { Error("read_pdb(): Parse CONECT start atom failed (\"%s\"). (l %d)\n",tmp,l); } // Read connected. for(i=0; i < (strlen(buf)-strlen("CONECT"))/5; i++) { // Parse connected atom id. memcpy(tmp,buf+strlen("CONECT")+i*5,5); tmp[5] = '\0'; if( sscanf(tmp,"%d",&t) != 1 ) { Error("read_pdb(): Parse CONECT line failed on id \"%s\". (l %d)\n",tmp,l); } if( i == 0 ) { // Find first ligand atom in bond. a = -1; for(j=0; j<state->nligand; j++) { if( state->ligand[j].id == t ) { a = j; } } if( a == -1 ) { // Bond connects to protein, discard it for now. // Well, presumably.. It doesn't connect to the ligand anyway. i = 2; break; } } else { // Connect to later atoms. // Convert IDs into indicies. int b = -1; for(j=0; j<state->nligand; j++) { if( state->ligand[j].id == t ) { b = j; } } if( b == -1 ) { Warn(" read_pdb(): Could not find ligand atom with id %d.\n",t); continue; } // Do we already have this bond (i.e., upgrade single to double, etc.)? for(j=0; j<state->nbonds; j++) { if( ((state->bonds[j][0] == a) && (state->bonds[j][1] == b)) || ((state->bonds[j][0] == b) && (state->bonds[j][1] == a)) ) { if( state->bonds[j][0] == a ) { // Only increment bonds starting from a. state->bonds[j][2]++; state->ligand[a].bonds++; state->ligand[b].bonds++; } break; } } if( j >= state->nbonds ) { // Save the new bond's ligand atom indicies. state->nbonds++; if( !(state->bonds=realloc(state->bonds,state->nbonds*4*sizeof(int))) ) { Error("read_pdb(): Grow of bond array failed.\n"); } state->bonds[state->nbonds-1][0] = a; // Atom_a. state->bonds[state->nbonds-1][1] = b; // Atom_b. state->bonds[state->nbonds-1][2] = 1; // Bond type. state->bonds[state->nbonds-1][3] = 0; // Aromatic? state->ligand[a].bonds++; state->ligand[b].bonds++; } } } } } // Finish up with the file. fclose(f); // Do some post-processing / cleanup on the data. clean_protein(state); } //////////////////////////////////////////////////////////// int read_sdf(state_t *state, char *sdf) { char buf[1024],atype[3]; FILE *f; int i,l,a,b,t,natoms,nbonds,nligs; // Open input SDF file. if( !(f=fopen(sdf,"r")) ) { Error("read_sdf(): Failed to open input SDF file.\n"); } // Save a clean version of file name. state->pfn = strdup(sdf); for(i=strlen(state->pfn)-1; (i >= 0) && (state->pfn[i] != '/'); i--); if( state->pfn[i] == '/' ) { i++; } state->pfn = &(state->pfn[i]); for(i=strlen(state->pfn)-1; (i >= 0) && (state->pfn[i] != '.'); i--); if( state->pfn[i] == '.' ) { state->pfn[i] = '\0'; } // Count number of ligs in this SDF for(l=0; fgets(buf,sizeof(buf),f) != NULL; ) { if( strncmp(buf,"$$$$",strlen("$$$$")) == 0 ) { l++; } } nligs = l; if( nligs <= state->ligndx ) { Error("read_sdf(): Ligand with index %d not found (out of %d).\n",state->ligndx,l); } rewind(f); // Skip past as many entries as needed to reach the requested one. for(l=0; (l < state->ligndx) && (fgets(buf,sizeof(buf),f) != NULL); ) { if( strncmp(buf,"$$$$",strlen("$$$$")) == 0 ) { l++; } } // Look for ligand name line. if( !fgets(buf,sizeof(buf),f) ) { Error("read_sdf(): Read header \"4k\" failed.\n"); } // Skip the two comment lines. for(l=0; (l < 2) && (fgets(buf,sizeof(buf),f) != NULL); l++); if( l != 2 ) { Error("read_sdf(): Read header commend lines failed.\n"); } // Finally read the header line we care about. // // 70 72 0 0 0 0 0 0 0 0999 V2000 if( !fgets(buf,sizeof(buf),f) ) { Error("read_sdf(): Read header failed.\n"); } if( (sscanf(buf,"%d %d",&natoms,&nbonds) != 2) ) { Error("read_sdf(): Parse header (natoms,nbonds) failed.\n"); } // Read atom lines. state->nligand = natoms; if( !(state->ligand=malloc(state->nligand*sizeof(atom_t))) ) { Error("read_sdf(): Grow of ligand array (%d) failed.\n",state->nligand); } memset(state->ligand,0,state->nligand*sizeof(atom_t)); for(l=0; (l < state->nligand) && (fgets(buf,sizeof(buf),f) != NULL); l++) { // // Parse atom lines (ligand). // // -1.3810 1.7640 1.8510 C 0 0 0 0 0 0 0 0 0 0 0 atype[0] = atype[1] = atype[2] = '\0'; if( sscanf(buf,"%lf %lf %lf %c%c", &(state->ligand[l].pos.s.x), &(state->ligand[l].pos.s.y), &(state->ligand[l].pos.s.z), &(atype[0]),&(atype[1]) ) != 5 ) { Error("read_sdf(): Parse atom line failed.\n"); } // Fill in id. state->ligand[l].id = l; // Convert atom symbol to single char type. state->ligand[l].type = toupper(atype[1]); if( atom_charge(state->ligand[l].type) == -1 ) { state->ligand[l].type = toupper(atype[0]); if( atom_charge(state->ligand[l].type) == -1 ) { Error("read_sdf(): Unexpected atom type '%c'.\n",state->ligand[l].type); } } } // Read bond lines. state->nbonds = nbonds; if( !(state->bonds=malloc(state->nbonds*4*sizeof(int))) ) { Error("read_sdf(): Grow of bond array (%d) failed.\n",state->nbonds); } memset(state->bonds,0,state->nbonds*4*sizeof(int)); for(l=0; (l < state->nbonds) && (fgets(buf,sizeof(buf),f) != NULL); l++) { // // Parse bond lines (ligand). // // 2 3 1 0 0 0 0 // 2 13 1 0 0 0 0 if( (sscanf(buf,"%d %d %d",&a,&b,&t)) != 3 ) { Error("read_sdf(): Parse atom line failed.\n"); } // Save the new bond's ligand atom indicies. a--; b--; state->bonds[l][0] = a; // Atom_a. state->bonds[l][1] = b; // Atom_b. state->bonds[l][2] = t; // Bond type. state->bonds[l][3] = 0; // Aromatic. state->ligand[a].bonds += t; state->ligand[b].bonds += t; } // Finish up with the file. fclose(f); // Do some post-processing / cleanup on the data. clean_protein(state); // Return the number of ligands in this SDF (just read one) return nligs; } //////////////////////////////////////////////////////////// void read_voxels(state_t *state, char *vox) { Warn("read_voxels(): Not yet implemented.\n"); } //////////////////////////////////////////////////////////// void read_ligand(state_t *state, char *lig) { Warn("read_ligand(): Not yet implemented.\n"); } //////////////////////////////////////////////////////////// void read_smiles(state_t *state, char *smiles) { Warn("read_smiles(): Not yet implemented.\n"); } //////////////////////////////////////////////////////////// // Output //////////////////////////////////////////////////////////// void write_protein_graph(state_t *state, double rad) { static int warn = 0; vector3_t v; FILE *f; int i,j,ti,ne,e,me,Me; float t; char fn[PATH_MAX]; // Open output file. sprintf(fn,"%s_seed_%d.nhg",state->pfn,state->seed); if( !(f=fopen(fn,"w")) ) { Error("write_protein_graph(): Failed to open output file \"%s\".\n",fn); } // Write stub for n-edges, write real n-nodes. ne = 0; if( fwrite(&ne, sizeof(int), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write n-edges.\n"); } ti = state->natoms; if( fwrite(&ti, sizeof(int), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write n-verticies.\n"); } // Write the nodes: int num_atm_active=0; for(i=0; i<state->natoms; i++) { // Write type. t = atom_charge(state->atoms[i].type); if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write atom type.\n"); } // Determine if the ith protein atom is within rad and call that active t = 0.0f; for(j=0; j<state->nligand; j++) { vector3_sub_vector(&(state->atoms[i].pos), &(state->ligand[j].pos), &v); if( vector3_length(&v) <= rad ) { t = 1.0; //num_atm_active++; } } num_atm_active++; // Write active site state if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write atom bind.\n"); } // Write position. t = state->atoms[i].pos.s.x; if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write atom x.\n"); } t = state->atoms[i].pos.s.y; if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write atom y.\n"); } t = state->atoms[i].pos.s.z; if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write atom z.\n"); } } printf("write_protein_graph(): Applied Active Site Labels to %d out of %d protein atoms with rad=%lf... \n",num_atm_active,state->natoms, rad); // Write the edges. me = 100000; Me = 0; for(i=0; i<state->natoms; i++) { // For each atom, consider all other atoms in a radius, e = 0; for(j=0; j<state->natoms; j++) { if( i == j ) { continue; } vector3_sub_vector(&(state->atoms[i].pos), &(state->atoms[j].pos), &v); if( vector3_length(&v) <= rad ) { // In bounds, add edge to this neighbor to the file. t = vector3_length(&v); if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write edge property.\n"); } t = i; if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write edge start.\n"); } t = j; if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write edge end.\n"); } ne++; e++; } } // Neighborhood stats for this atom if( e < me ) { me = e; } if( e > Me ) { Me = e; } if( !e && !warn ) { warn = 1; printf("write_protein_graph(): Found protein atom with empty neighborhood!\n"); } } // Now that we know the number of edges, overwrite the stub. rewind(f); if( fwrite(&ne, sizeof(int), 1, f) != 1 ) { Error("write_protein_graph(): Failed to write n-edges.\n"); } // Close file. fclose(f); // Some verbose info printf(" edges = %d\n",ne); printf(" min nbrs = %d\n",me); printf(" max nbrs = %d\n",Me); printf(" avg nbrs = %.3f\n",((double)ne)/state->natoms); } //////////////////////////////////////////////////////////// void write_voxels(state_t *state, int x, int y, int z, int w, int h, int d, char *fn) { FILE *f; int c,i,j,k,ndx; float t; // Open output file. if( !(f=fopen(fn,"w")) ) { Error("write_voxels(): Failed to open output file \"%s\".\n",fn); } // Write channel count and channel names. if( fwrite(&(state->chnls), sizeof(int), 1, f) != 1 ) { Error("write_voxels(): Failed to write num channels.\n"); } if( fwrite(state->chnlt, strlen(state->chnlt)*sizeof(char), 1, f) != 1 ) { Error("write_voxels(): Failed to write num channels.\n"); } // Write position of stencil box/win within global bounds. i = x + w/2; if( fwrite(&i, sizeof(int), 1, f) != 1 ) { Error("write_voxels(): Failed to write x.\n"); } i = y + h/2; if( fwrite(&i, sizeof(int), 1, f) != 1 ) { Error("write_voxels(): Failed to write y.\n"); } i = z + d/2; if( fwrite(&i, sizeof(int), 1, f) != 1 ) { Error("write_voxels(): Failed to write z.\n"); } // Write the size of the box. if( fwrite(&w, sizeof(int), 1, f) != 1 ) { Error("write_voxels(): Failed to write width.\n"); } if( fwrite(&h, sizeof(int), 1, f) != 1 ) { Error("write_voxels(): Failed to write height.\n"); } if( fwrite(&d, sizeof(int), 1, f) != 1 ) { Error("write_voxels(): Failed to write depth.\n"); } // Now write the voxels. for(c=0; c < state->chnls; c++) { for(k=z; k < z+d; k++) { for(j=y; j < y+h; j++) { for(i=x; i < x+w; i++) { ndx = c*(state->vz*state->vy*state->vx) + k*(state->vy*state->vx) + j*(state->vx) + i; if( (k < state->vz) && (j < state->vy) && (i < state->vx) && (k >= 0) && (j >= 0) && (i >= 0) ) { // In bounds, write voxel. if( fwrite(&(state->voxels[ndx]), sizeof(float), 1, f) != 1 ) { Error("write_voxels(): Failed to write voxel (%d,%d,%d) [%d].\n", i, j, k, ndx); } } else { // Out of bounds, so pad. t = 0.0; if( fwrite(&t, sizeof(float), 1, f) != 1 ) { Error("write_voxels(): Failed to write voxel (%d,%d,%d) [%d].\n", i, j, k, ndx); } } } } } } // Close file. fclose(f); } //////////////////////////////////////////////////////////// void label_protein_active_site_nodes(state_t *state, int w, int h, int d, int s, int t, int l) { } //////////////////////////////////////////////////////////// void write_stencil_boxes(state_t *state, int w, int h, int d, int s, int t, int l) { char buf[1024]; float pct,tot,lpct,o,nv; int c,i,j,k,x,y,z,ndx,nb,f; // Consider each possible stencil box position. tot = 0.0; for(x=-(w-1); x < state->vx+(w-1); x+=s) { for(y=-(h-1); y < state->vy+(h-1); y+=s) { for(z=-(d-1); z < state->vz+(d-1); z+=s) { tot++; } } } //tot = (2.0*(w-1.0))/s * (2.0*(h-1.0))/s * (2.0*(d-1.0)/s); pct = 0.0f; lpct = 0.0f; o = 0.0f; nb = 0; for(x=-(w-1); x < state->vx+(w-1); x+=s) { for(y=-(h-1); y < state->vy+(h-1); y+=s) { for(z=-(d-1); z < state->vz+(d-1); z+=s) { // Would a stencil box here hold enough non-empty voxels? if( l ) { // Ligand-only mode, see if the ligand fits. if( ((x/((double)state->res)) -(VOXELIZER_GLOBAL_SIZE/2.0) < state->lmin.s.x) && (((x+w)/((double)state->res))-(VOXELIZER_GLOBAL_SIZE/2.0) > state->lmax.s.x) && ((y/((double)state->res)) -(VOXELIZER_GLOBAL_SIZE/2.0) < state->lmin.s.y) && (((y+h)/((double)state->res))-(VOXELIZER_GLOBAL_SIZE/2.0) > state->lmax.s.y) && ((z/((double)state->res)) -(VOXELIZER_GLOBAL_SIZE/2.0) < state->lmin.s.z) && (((z+d)/((double)state->res))-(VOXELIZER_GLOBAL_SIZE/2.0) > state->lmax.s.z) ) { // The ligand fits in the box starting at x,y,z with size w,h,d. f = 1; } else { // The ligand doesn't fit in the box. f = 0; } } else { // Not ligand-only mode, always say "fits" f = 1; } nv = 0; if( f ) { // Skip the no-fit case. #pragma omp parallel for private(c,k,j,i,ndx) reduction(+:nv) collapse(2) for(c=0; c < state->chnls; c++) { for(k=z; k < z+d; k++) { for(j=y; j < y+h; j++) { for(i=x; i < x+w; i++) { if( (k < state->vz) && (j < state->vy) && (i < state->vx) && (k >= 0) && (j >= 0) && (i >= 0) ) { // Increment num-valid (occupancy) count. ndx = c*(state->vz*state->vy*state->vx) + k*(state->vy*state->vx) + j*(state->vx) + i; nv += state->voxels[ndx]; } } } } } } // Make a progress print to stdout. pct++; if( (pct-lpct)/tot > 0.1 ) { lpct = pct; printf("."); fflush(stdout); } if( f && (nv >= t) ) { // Non-empty threshold met, write the stencil box. nb++; o += nv; // Do the write. sprintf(buf,"%s_box_%d_%d_%d_seed_%d.vox",state->pfn,x,y,z,state->seed); write_voxels(state, x, y, z, w, h, d, buf); } } } } // Print out some summary info to stdout. printf("\n Wrote %d stencil boxes, avg occupancy: %.1f.\n",nb,o/nb); state->nsb = nb; } //////////////////////////////////////////////////////////// void write_ligand(state_t *state, char *fn) { vector3_t v; FILE *f; char buf[128]; int i,j,k; // Open output ligand file. if( !(f=fopen(fn,"w")) ) { Error("write_ligand(): Failed to open output file \"%s\".\n",fn); } // Write atom count header. sprintf(buf,"%d\n",state->nligand); if( fwrite(buf, strlen(buf), 1, f) != 1 ) { Error("write_ligand(): Failed to write atom count.\n"); } // Write the atomic charge vector. for(i=0; i<state->nligand; i++) { sprintf(buf,"%2d%c",atom_charge(state->ligand[i].type), ((i==(state->nligand-1))?('\n'):(' '))); if( fwrite(buf, strlen(buf), 1, f) != 1 ) { Error("write_ligand(): Failed to write atom charge (%d).\n",i); } } // Write formal charge vector. for(i=0; i<state->nligand; i++) { if( state->ligand[i].charge < 0 ) { sprintf(buf,"%1d%c",state->ligand[i].charge, ((i==(state->nligand-1))?('\n'):(' '))); } else if( state->ligand[i].charge > 0 ) { sprintf(buf,"+%1d%c",state->ligand[i].charge, ((i==(state->nligand-1))?('\n'):(' '))); } else { sprintf(buf,"%2d%c",0, ((i==(state->nligand-1))?('\n'):(' '))); } if( fwrite(buf, strlen(buf), 1, f) != 1 ) { Error("write_ligand(): Failed to write atom charge (%d).\n",i); } } // Write bond matrix. for(i=0; i<state->nligand; i++) { for(j=0; j<state->nligand; j++) { for(k=0; k<state->nbonds; k++) { if( ((state->bonds[k][0] == i) && (state->bonds[k][1] == j)) || ((state->bonds[k][1] == i) && (state->bonds[k][0] == j)) ) { // Found a bond. sprintf(buf,"%2d%c",state->bonds[k][2], ((j==(state->nligand-1))?('\n'):(' '))); if( fwrite(buf, strlen(buf), 1, f) != 1 ) { Error("write_ligand(): Failed to write bond number (%d,%d).\n",i,j); } break; } } if( k >= state->nbonds ) { // No bond. sprintf(buf,"%2d%c",0,((j==(state->nligand-1))?('\n'):(' '))); if( fwrite(buf, strlen(buf), 1, f) != 1 ) { Error("write_ligand(): Failed to write bond number (%d,%d).\n",i,j); } } } } // Write distance matrix. for(i=0; i<state->nligand; i++) { for(j=0; j<state->nligand; j++) { vector3_sub_vector(&(state->ligand[i].pos), &(state->ligand[j].pos), &v); sprintf(buf,"%.3le%c",vector3_length(&v), ((j==(state->nligand-1))?('\n'):(' '))); if( fwrite(buf, strlen(buf), 1, f) != 1 ) { Error("write_ligand(): Failed to write bond number (%d,%d).\n",i,j); } } } // Close file. fclose(f); } ////////////////////////////////////////////////////////////
second.c
#include <stdio.h> #include <omp.h> int main(){ int A[10] = {1,2,3,4,5,6,7,8,9,10}, i, m, k; omp_set_dynamic(0); m = omp_get_num_procs(); printf("Max No. of threads: %d\n", omp_get_max_threads()); omp_set_num_threads(6); #pragma omp parallel printf("Hello from thread no. %d of total %d threads\n", omp_get_thread_num(), omp_get_num_threads()); return 0; }
aarch64_vfabi_NarrowestDataSize.c
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s // REQUIRES: aarch64-registered-target // Note: -fopemp and -fopenmp-simd behavior are expected to be the same. // This test checks the values of Narrowest Data Size (NDS), as defined in // https://github.com/ARM-software/abi-aa/tree/master/vfabia64 // // NDS is used to compute the <vlen> token in the name of AdvSIMD // vector functions when no `simdlen` is specified, with the rule: // // if NDS(f) = 1, then VLEN = 16, 8; // if NDS(f) = 2, then VLEN = 8, 4; // if NDS(f) = 4, then VLEN = 4, 2; // if NDS(f) = 8 or NDS(f) = 16, then VLEN = 2. // NDS(NDS_is_sizeof_char) = 1 #pragma omp declare simd notinbranch char NDS_is_sizeof_char(short in); // CHECK-DAG: _ZGVnN16v_NDS_is_sizeof_char // CHECK-DAG: _ZGVnN8v_NDS_is_sizeof_char // CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_char // NDS(NDS_is_sizeof_short) = 2 #pragma omp declare simd notinbranch int NDS_is_sizeof_short(short in); // CHECK-DAG: _ZGVnN8v_NDS_is_sizeof_short // CHECK-DAG: _ZGVnN4v_NDS_is_sizeof_short // CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_short // NDS(NDS_is_sizeof_float_with_linear) = 4, and not 2, because the pointers are // marked as `linear` and therefore the size of the pointee realizes // the NDS. #pragma omp declare simd linear(sin) notinbranch void NDS_is_sizeof_float_with_linear(double in, float *sin); // Neon accepts only power of 2 values as <vlen>. // CHECK-DAG: _ZGVnN4vl4_NDS_is_sizeof_float_with_linear // CHECK-DAG: _ZGVnN2vl4_NDS_is_sizeof_float_with_linear // CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_float_with_linear // NDS(NDS_is_size_of_float) = 4 #pragma omp declare simd notinbranch double NDS_is_size_of_float(float in); // CHECK-DAG: _ZGVnN4v_NDS_is_size_of_float // CHECK-DAG: _ZGVnN2v_NDS_is_size_of_float // CHECK-NOT: _ZGV{{.*}}_NDS_is_size_of_float // NDS(NDS_is_sizeof_double) = 8 #pragma omp declare simd linear(sin) notinbranch void NDS_is_sizeof_double(double in, double *sin); // CHECK-DAG: _ZGVnN2vl8_NDS_is_sizeof_double // CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_double // NDS(double_complex) = 16 #pragma omp declare simd notinbranch double _Complex double_complex(double _Complex); // CHECK-DAG: _ZGVnN2v_double_complex // CHECK-NOT: _ZGV{{.*}}_double_complex // NDS(double_complex_linear_char) = 1, becasue `x` is marked linear. #pragma omp declare simd linear(x) notinbranch double _Complex double_complex_linear_char(double _Complex y, char *x); // CHECK-DAG: _ZGVnN8vl_double_complex_linear_char // CHECK-DAG: _ZGVnN16vl_double_complex_linear_char // CHECK-NOT: _ZGV{{.*}}_double_complex_linear_char static float *F; static double *D; static short S; static int I; static char C; static double _Complex DC; void do_something() { C = NDS_is_sizeof_char(S); I = NDS_is_sizeof_short(S); NDS_is_sizeof_float_with_linear(*D, F); *D = NDS_is_size_of_float(*F); NDS_is_sizeof_double(*D, D); DC = double_complex(DC); DC = double_complex_linear_char(DC, &C); }
ejercicio3.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> main(){ int n=9, i,a,b[n]; for(i=0;i<n;i++) b[i]=-1; #pragma omp parallel { #pragma omp single { printf("Introduce valor de inicializacion a:"); scanf("%d",&a); printf("Single ejecutada por el thread%d\n",omp_get_thread_num()); } #pragma omp for for(i=0;i<n;i++) b[i]=a; #pragma omp master { for(i=0;i<n;i++){ printf("Master ejecutada por el thread%d\n",omp_get_thread_num()); printf("b[%d]=%d\t",i,b[i]); } } printf("\n"); } }
teams-5.c
/* { dg-do run } */ #include <stdlib.h> #define EPS 0.00001 #define N 10000 void init (float *a, float *b, int n) { int i; for (i = 0; i < n; i++) { a[i] = 0.1 * i; b[i] = 0.01 * i * i; } } void vec_mult_ref (float *p, float *v1, float *v2, int n) { int i; for (i = 0; i < n; i++) p[i] = v1[i] * v2[i]; } void vec_mult (float *p, float *v1, float *v2, int n) { int i; #pragma omp target teams map(to: v1[0:n], v2[:n]) map(from: p[0:n]) #pragma omp distribute simd for (i = 0; i < n; i++) p[i] = v1[i] * v2[i]; } void check (float *a, float *b, int n) { int i; for (i = 0 ; i < n ; i++) { float err = (a[i] == 0.0) ? b[i] : (b[i] - a[i]) / a[i]; if (((err > 0) ? err : -err) > EPS) abort (); } } int main () { float *p1 = (float *) malloc (N * sizeof (float)); float *p2 = (float *) malloc (N * sizeof (float)); float *v1 = (float *) malloc (N * sizeof (float)); float *v2 = (float *) malloc (N * sizeof (float)); init (v1, v2, N); vec_mult_ref (p1, v1, v2, N); vec_mult (p2, v1, v2, N); check (p1, p2, N); free (p1); free (p2); free (v1); free (v2); return 0; }
stream_malloc.c
// Copyright 2009-2019 NTESS. Under the terms // of Contract DE-NA0003525 with NTESS, the U.S. // Government retains certain rights in this software. // // Copyright (c) 2009-2019, NTESS // All rights reserved. // // Portions are copyright of other developers: // See the file CONTRIBUTORS.TXT in the top level directory // the distribution for more information. // // This file is part of the SST software package. For license // information, see the LICENSE file in the top level directory of the // distribution. #include <stdio.h> #include <stdlib.h> #include "ariel.h" int main(int argc, char* argv[]) { const int LENGTH = 2000; printf("Allocating arrays of size %d elements.\n", LENGTH); ariel_malloc_flag(0, 1, 1); double* a = (double*) malloc(sizeof(double) * LENGTH); ariel_malloc_flag(1, 1, 1); double* b = (double*) malloc(sizeof(double) * LENGTH); ariel_malloc_flag(2, 1, 1); double* c = (double*) malloc(sizeof(double) * LENGTH); printf("Done allocating arrays.\n"); int i; for(i = 0; i < LENGTH; ++i) { a[i] = i; b[i] = LENGTH - i; c[i] = 0; } printf("Perfoming the fast_c compute loop...\n"); #pragma omp parallel for for(i = 0; i < LENGTH; ++i) { //printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i])); c[i] = 2.0 * a[i] + 1.5 * b[i]; } double sum = 0; for(i = 0; i < LENGTH; ++i) { sum += c[i]; } printf("Sum of arrays is: %f\n", sum); printf("Freeing arrays...\n"); free(a); free(b); free(c); printf("Done.\n"); }
scan.c
#include <stdio.h> #define N 100 int main(void) { int a[N], b[N]; long long w = 0; // initialization for (int k = 0; k < N; k++) a[k] = k + 1; // a[k] is included in the computation of producing results in b[k] // #pragma omp parallel for simd reduction(inscan,+: w) #pragma omp parallel for reduction(inscan, +: w) for (int k = 0; k < N; k++) { w += a[k]; #pragma omp scan inclusive(w) b[k] = w; } printf("w=%lld, b[0]=%d b[1]=%d b[2]=%d\n", w, b[0], b[1], b[2]); // 5050, 1 3 6 if( w!=5050 || b[0]!=1 || b[1]!=3 || b[2]!=6 ){ printf("Fail!\n"); return 1; } printf("Success!\n"); return 0; }
DRB064-outeronly2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized. The inner loop has loop carried true data dependence. However, the loop is not parallelized so no race condition. */ int n=100, m=100; double b[100][100]; int init() { int i,j,k; #pragma omp parallel for private(i ,j ) for (i = 0; i < n; i++) { #pragma omp parallel for private(j ) for (j = 0; j < m; j++) { b[i][j] = i * j; } } return 0; } void foo(int n, int m) { int i,j; #pragma omp parallel for private(i ,j ) for (i=0;i<n;i++) for (j=1;j<m;j++) // Be careful about bounds of j b[i][j]=b[i][j-1]; } int print() { int i,j,k; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%lf\n", b[i][j]); } } return 0; } int main() { init(); foo(100, 100); print(); return 0; }
rose_ompfor2.c
/* loop scheduling */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "libxomp.h" int a[20]; void foo(int lower,int upper,int stride) { int i; if (XOMP_single()) { printf("---------default schedule--------------\n"); } XOMP_barrier(); { int _p_i; long p_index_; long p_lower_; long p_upper_; XOMP_loop_default(lower,upper - 1,stride,&p_lower_,&p_upper_); for (p_index_ = p_lower_; p_index_ <= p_upper_; p_index_ += stride) { a[p_index_] = p_index_ * 2; printf("Iteration %2d is carried out by thread %2d\n",p_index_,(omp_get_thread_num())); } } XOMP_barrier(); if (XOMP_single()) { printf("---------static schedule--------------\n"); } XOMP_barrier(); { int _p_i; long p_index_; long p_lower_; long p_upper_; XOMP_loop_default(lower,upper - 1,stride,&p_lower_,&p_upper_); for (p_index_ = p_lower_; p_index_ <= p_upper_; p_index_ += stride) { a[p_index_] = p_index_ * 2; printf("Iteration %2d is carried out by thread %2d\n",p_index_,(omp_get_thread_num())); } XOMP_barrier(); } if (XOMP_single()) { printf("---------(static,5) schedule--------------\n"); } XOMP_barrier(); { int _p_i; long p_index_; long p_lower_; long p_upper_; XOMP_loop_static_init(lower,upper - 1,stride,5); if (XOMP_loop_static_start(lower,upper - 1,stride,5,&p_lower_,&p_upper_)) { do { for (p_index_ = p_lower_; p_index_ <= p_upper_; p_index_ += stride) { a[p_index_] = p_index_ * 2; printf("Iteration %2d is carried out by thread %2d\n",p_index_,(omp_get_thread_num())); } }while (XOMP_loop_static_next(&p_lower_,&p_upper_)); } XOMP_loop_end(); } if (XOMP_single()) { printf("---------(dynamic,3) schedule--------------\n"); } XOMP_barrier(); { int _p_i; long p_index_; long p_lower_; long p_upper_; XOMP_loop_dynamic_init(lower,upper - 1,stride,3); if (XOMP_loop_dynamic_start(lower,upper - 1,stride,3,&p_lower_,&p_upper_)) { do { for (p_index_ = p_lower_; p_index_ <= p_upper_; p_index_ += stride) { a[p_index_] = p_index_ * 2; printf("Iteration %2d is carried out by thread %2d\n",p_index_,(omp_get_thread_num())); } }while (XOMP_loop_dynamic_next(&p_lower_,&p_upper_)); } XOMP_loop_end(); } #if 1 if (XOMP_single()) { printf("---------(guided) schedule--------------\n"); } XOMP_barrier(); { int _p_i; long p_index_; long p_lower_; long p_upper_; XOMP_loop_guided_init(lower,upper - 1,stride,1); if (XOMP_loop_guided_start(lower,upper - 1,stride,1,&p_lower_,&p_upper_)) { do { for (p_index_ = p_lower_; p_index_ <= p_upper_; p_index_ += stride) { a[p_index_] = p_index_ * 2; printf("Iteration %2d is carried out by thread %2d\n",p_index_,(omp_get_thread_num())); } }while (XOMP_loop_guided_next(&p_lower_,&p_upper_)); } XOMP_loop_end(); } #endif if (XOMP_single()) { printf("---------(runtime) ordered schedule--------------\n"); } XOMP_barrier(); { int _p_i; long p_index_; long p_lower_; long p_upper_; XOMP_loop_ordered_runtime_init(lower,upper - 1,stride); if (XOMP_loop_ordered_runtime_start(lower,upper - 1,stride,&p_lower_,&p_upper_)) { do { for (p_index_ = p_lower_; p_index_ <= p_upper_; p_index_ += stride) { a[p_index_] = p_index_ * 2; printf("Iteration %2d is carried out by thread %2d\n",p_index_,(omp_get_thread_num())); } }while (XOMP_loop_ordered_runtime_next(&p_lower_,&p_upper_)); } XOMP_loop_end(); } } static void OUT__1__9651__(void *__out_argv); int main(int argc,char **argv) { int status = 0; XOMP_init(argc,argv); //#pragma omp parallel for schedule (auto) XOMP_parallel_start(OUT__1__9651__,0,1,0,"/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/ompfor2.c",81); XOMP_parallel_end("/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/ompfor2.c",86); XOMP_terminate(status); return 0; } static void OUT__1__9651__(void *__out_argv) { if (XOMP_single()) { printf("Using %d threads.\n",(omp_get_num_threads())); } XOMP_barrier(); foo(0,20,3); }
core_slacpy_band.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlacpy_band.c, normal z -> s, Fri Sep 28 17:38:19 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" /******************************************************************************* * * @ingroup core_float * * plasma_core_slacpy copies a sub-block A of a band matrix stored in LAPACK's band format * to a corresponding sub-block B of a band matrix in PLASMA's band format * ******************************************************************************* * * @param[in] it * The row block index of the tile. * * @param[in] jt * The column block index of the tile. * * @param[in] m * The number of rows of the matrices A and B. M >= 0. * * @param[in] n * The number of columns of the matrices A and B. N >= 0. * * @param[in] A * The M-by-N matrix to copy. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,M). * * @param[out] B * The M-by-N copy of the matrix A. * On exit, B = A ONLY in the locations specified by uplo. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,M). * ******************************************************************************/ __attribute__((weak)) void plasma_core_slacpy_lapack2tile_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const float *A, int lda, float *B, int ldb) { int i, j; int j_start, j_end; if (uplo == PlasmaGeneral) { j_start = 0; // pivot back and could fill in j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1)); } else if (uplo == PlasmaUpper) { j_start = 0; j_end = imin(n, (it-jt)*nb+m+ku+1); } else { j_start = imax(0, (it-jt)*nb-kl); j_end = n; } for (j = 0; j < j_start; j++) { for (i = 0; i < m; i++) { B[i + j*ldb] = 0.0; } } for (j = j_start; j < j_end; j++) { int i_start, i_end; if (uplo == PlasmaGeneral) { i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl)); i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1)); // +nb because we use sgetrf on panel and pivot back within the panel. // so the last tile in panel could fill. } else if (uplo == PlasmaUpper) { i_start = imax(0, (jt-it)*nb+j-ku); i_end = imin(m, (jt-it)*nb+j+1); } else { i_start = imax(0, (jt-it)*nb+j); i_end = imin(m, (jt-it)*nb+j+kl+1); } for (i = 0; i < i_start; i++) { B[i + j*ldb] = 0.0; } for (i = i_start; i < i_end; i++) { B[i + j*ldb] = A[i + j*lda]; } for (i = i_end; i < m; i++) { B[i + j*ldb] = 0.0; } } for (j = j_end; j < n; j++) { for (i = 0; i < m; i++) { B[i + j*ldb] = 0.0; } } } /******************************************************************************/ void plasma_core_omp_slacpy_lapack2tile_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const float *A, int lda, float *B, int ldb) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:B[0:ldb*n]) plasma_core_slacpy_lapack2tile_band(uplo, it, jt, m, n, nb, kl, ku, A, lda, B, ldb); } /******************************************************************************* * * @ingroup core_float * * plasma_core_slacpy copies all or part of a two-dimensional matrix A to another * matrix B * ******************************************************************************* * * @param[in] it * The row block index of the tile. * * @param[in] jt * The column block index of the tile. * * @param[in] m * The number of rows of the matrices A and B. m >= 0. * * @param[in] n * The number of columns of the matrices A and B. n >= 0. * * @param[in] A * The m-by-n matrix to copy. * * @param[in] lda * The leading dimension of the array A. lda >= max(1, m). * * @param[out] B * The m-by-n copy of the matrix A. * On exit, B = A ONLY in the locations specified by uplo. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1, m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_slacpy_tile2lapack_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const float *B, int ldb, float *A, int lda) { int i, j; int j_start, j_end; if (uplo == PlasmaGeneral) { j_start = 0; // pivot back and could fill in j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1)); } else if (uplo == PlasmaUpper) { j_start = 0; j_end = imin(n, (it-jt)*nb+m+ku+1); } else { j_start = imax(0, (it-jt)*nb-kl); j_end = n; } for (j = j_start; j < j_end; j++) { int i_start, i_end; if (uplo == PlasmaGeneral) { i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl)); i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1)); // +nb because we use sgetrf on panel and pivot back within the panel. // so the last tile in panel could fill. } else if (uplo == PlasmaUpper) { i_start = imax(0, (jt-it)*nb+j-ku); i_end = imin(m, (jt-it)*nb+j+1); } else { i_start = imax(0, (jt-it)*nb+j); i_end = imin(m, (jt-it)*nb+j+kl+1); } for (i = i_start; i < i_end; i++) { A[i + j*lda] = B[i + j*ldb]; } } } /******************************************************************************/ void plasma_core_omp_slacpy_tile2lapack_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const float *B, int ldb, float *A, int lda) { #pragma omp task depend(in:B[0:ldb*n]) \ depend(out:A[0:lda*n]) plasma_core_slacpy_tile2lapack_band(uplo, it, jt, m, n, nb, kl, ku, B, ldb, A, lda); }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 14000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 100 #endif #endif #ifndef NTIMES # define NTIMES 100 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <time.h> #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
GB_unop__identity_int16_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int16_int8 // op(A') function: GB_unop_tran__identity_int16_int8 // C type: int16_t // A type: int8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int16_int8 ( int16_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
prand.c
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Source/prand: parallel random number generator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // A simple thread-safe parallel pseudo-random nuumber generator. #include "prand.h" //------------------------------------------------------------------------------ // prand macros //------------------------------------------------------------------------------ // Generate the next seed, and extract a random 15-bit value from a seed. #define PRAND_RECURENCE(seed) ((seed) * 1103515245 + 12345) #define PRAND_15_MAX 32767 #define PRAND_15(seed) (((seed)/65536) % (PRAND_15_MAX + 1)) //------------------------------------------------------------------------------ // global types and operators //------------------------------------------------------------------------------ // These can be shared by all threads in a user application, and thus are // safely declared as global objects. GrB_Type prand_type = NULL ; GrB_UnaryOp prand_next_op = NULL ; GrB_UnaryOp prand_iget_op = NULL ; GrB_UnaryOp prand_xget_op = NULL ; GrB_BinaryOp prand_dup_op = NULL ; //------------------------------------------------------------------------------ // prand_next_op: unary operator to construct the next seed //------------------------------------------------------------------------------ // z = f(x), where x is the old seed and z is the new seed. void prand_next_f (prand_t *z, const prand_t *x) { for (int k = 0 ; k < 5 ; k++) { z->seed [k] = PRAND_RECURENCE (x->seed [k]) ; } } //------------------------------------------------------------------------------ // prand_iget: unary operator to construct get a random integer from the seed //------------------------------------------------------------------------------ // z = f(x), where x is a random seed, and z is an unsigned 64-bit // pseudo-random number constructed from the seed. void prand_iget_f (uint64_t *z, const prand_t *x) { uint64_t i = 0 ; for (int k = 0 ; k < 5 ; k++) { i = PRAND_15_MAX * i + PRAND_15 (x->seed [k]) ; } (*z) = i ; } //------------------------------------------------------------------------------ // prand_xget: unary operator to construct get a random double from the seed //------------------------------------------------------------------------------ // z = f(x), where x is a random seed, and z is a double precision // pseudo-random number constructed from the seed, in the range 0 to 1. void prand_xget_f (double *z, prand_t *x) { uint64_t i ; prand_iget_f (&i, x) ; (*z) = ((double) i) / ((double) UINT64_MAX) ; } //------------------------------------------------------------------------------ // prand_dup: binary operator to build a vector //------------------------------------------------------------------------------ // This is required by GrB_Vector_build, but is never called since no // duplicates are created. This is the SECOND operator for the prand_type. #if defined ( __INTEL_COMPILER ) // disable icc warnings // 869: unused parameters #pragma warning (disable: 869 ) #elif defined ( __GNUC__ ) #pragma GCC diagnostic ignored "-Wunused-parameter" #endif void prand_dup_f (prand_t *z, /* unused: */ const prand_t *x, const prand_t *y) { (*z) = (*y) ; } //------------------------------------------------------------------------------ // prand_init: create the random seed type and its operators //------------------------------------------------------------------------------ #define PRAND_FREE_ALL \ { \ GrB_Type_free (&prand_type) ; \ GrB_UnaryOp_free (&prand_next_op) ; \ GrB_UnaryOp_free (&prand_iget_op) ; \ GrB_UnaryOp_free (&prand_xget_op) ; \ GrB_BinaryOp_free (&prand_dup_op) ; \ } #undef OK #define OK(method) \ { \ GrB_Info info = method ; \ if (info != GrB_SUCCESS) \ { \ PRAND_FREE_ALL ; \ printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \ return (info) ; \ } \ } GrB_Info prand_init ( ) { prand_type = NULL ; prand_next_op = NULL ; prand_iget_op = NULL ; prand_xget_op = NULL ; prand_dup_op = NULL ; OK (GrB_Type_new (&prand_type, sizeof (prand_t))) ; OK (GrB_UnaryOp_new (&prand_next_op, (GxB_unary_function) prand_next_f, prand_type, prand_type)) ; OK (GrB_UnaryOp_new (&prand_iget_op, (GxB_unary_function) prand_iget_f, GrB_UINT64, prand_type)) ; OK (GrB_UnaryOp_new (&prand_xget_op, (GxB_unary_function) prand_xget_f, GrB_FP64, prand_type)) ; OK (GrB_BinaryOp_new (&prand_dup_op, (GxB_binary_function) prand_dup_f, prand_type, prand_type, prand_type)) ; return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_finalize: free the random seed type and its operators //------------------------------------------------------------------------------ GrB_Info prand_finalize ( ) { PRAND_FREE_ALL ; return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_next: get the next random numbers //------------------------------------------------------------------------------ GrB_Info prand_next ( GrB_Vector Seed ) { return (GrB_Vector_apply (Seed, NULL, NULL, prand_next_op, Seed, NULL)) ; } //------------------------------------------------------------------------------ // prand_seed: create a vector of random seeds //------------------------------------------------------------------------------ // Returns a vector of random seed values. #define PRAND_FREE_WORK \ { \ free (I) ; \ free (X) ; \ } #undef PRAND_FREE_ALL #define PRAND_FREE_ALL \ { \ PRAND_FREE_WORK ; \ GrB_Vector_free (Seed) ; \ } GrB_Info prand_seed ( GrB_Vector *Seed, // vector of random number seeds int64_t seed, // scalar input seed GrB_Index n, // size of Seed to create int nthreads // # of threads to use (OpenMP default if <= 0) ) { GrB_Index *I = NULL ; prand_t *X = NULL ; // allocate the Seed vector OK (GrB_Vector_new (Seed, prand_type, n)) ; // allocate the I and X arrays I = malloc ((n+1) * sizeof (GrB_Index)) ; X = malloc ((n+1) * sizeof (prand_t)) ; if (I == NULL || X == NULL) { PRAND_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // determine # of threads to use int nthreads_max = 1 ; #ifdef _OPENMP nthreads_max = omp_get_max_threads ( ) ; #endif if (nthreads <= 0 || nthreads > nthreads_max) { nthreads = nthreads_max ; } // construct the tuples for the initial seeds int64_t i, len = (int64_t) n ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (i = 0 ; i < len ; i++) { I [i] = i ; for (int k = 0 ; k < 5 ; k++) { X [i].seed [k] = (100000000*(seed) + 10*i + k + 1) ; } } // build the Seed vector OK (GrB_Vector_build_UDT (*Seed, I, X, n, prand_dup_op)) ; // free workspace PRAND_FREE_WORK ; // advance to the first set of random numbers OK (prand_next (*Seed)) ; return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_print: print the Seed vector //------------------------------------------------------------------------------ // This is meant for testing, not production use. #undef PRAND_FREE_ALL #define PRAND_FREE_ALL ; GrB_Info prand_print ( GrB_Vector Seed, int pr // 0: print nothing, 1: print some, 2: print all ) { if (pr > 0) { GrB_Index n ; OK (GrB_Vector_nvals (&n, Seed)) ; printf ("\nSeed: length %g\n", (double) n) ; prand_t x ; for (int k = 0 ; k < 5 ; k++) x.seed [k] = -1 ; for (int64_t i = 0 ; i < (int64_t) n ; i++) { if (GrB_Vector_extractElement_UDT (&x, Seed, i) == GrB_SUCCESS) { printf ("%g: ", (double) i) ; for (int k = 0 ; k < 5 ; k++) { printf (" %.18g", (double) (x.seed [k])) ; } printf ("\n") ; } if (pr == 1 && i > 10) break ; } } return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_iget: return a vector of random uint64 integers //------------------------------------------------------------------------------ GrB_Info prand_iget ( GrB_Vector X, GrB_Vector Seed ) { OK (GrB_Vector_apply (X, NULL, NULL, prand_iget_op, Seed, NULL)) ; return (prand_next (Seed)) ; } //------------------------------------------------------------------------------ // prand_xget: return a vector of random doubles, in range 0 to 1 inclusive //------------------------------------------------------------------------------ GrB_Info prand_xget ( GrB_Vector X, GrB_Vector Seed ) { OK (GrB_Vector_apply (X, NULL, NULL, prand_xget_op, Seed, NULL)) ; return (prand_next (Seed)) ; }
pi.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. $gcc -fopenmp */ #include <stdio.h> #include <omp.h> static long num_steps = 100000000; double step; int main () { int ID, sum = 0.0; double x, pi; double start_time, run_time; step = 1.0/(double) num_steps; start_time = omp_get_wtime(); #pragma omp parallel num_threads(8) private(ID) { int id = omp_get_thread_num(); int numThreads = omp_get_num_threads(); double sumHilo = 0.0; //Variable privada de cada hilo for (int i=id; i<= num_steps; i+=numThreads ){ x = (i-0.5)*step; sumHilo = sumHilo + 4.0/ (1.0+x*x); //False sharing. Compartición falsa, ya que usan la misma línea de caché para acceder, por lo que ralentiza y pueden solaparse } #pragma omp atomic sum += sumHilo; #pragma omp single //Solo se ejecuta una vez, para todos los procesos printf("Número de hilos %d\n", numThreads ); } /* #pragma omp atomic //sin llaves y solo una sentencia, que sea actualización de variable sum = sum + 4.0/(1.0+x*x); //condicion de carrera. Los hilos leen y escriben a la vez en esta línea */ /* #pragma omp critical { //operaciones criticas tb que deban ser atómicas //Lo mismo que antes pero más general } */ pi = sum * step; run_time = omp_get_wtime() - start_time; printf("\n pi with %ld steps is %lf in %lf seconds\n ",num_steps,pi,run_time); }
graph_generator.h
/* Copyright (C) 2009-2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #ifndef GRAPH_GENERATOR_H #define GRAPH_GENERATOR_H #include "user_settings.h" #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include<sys/time.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #ifdef GRAPH_GENERATOR_OMP #include <omp.h> #endif #ifdef __cplusplus extern "C" { #endif unsigned long *remap_vector; #ifdef GENERATOR_USE_PACKED_EDGE_TYPE typedef struct packed_edge { uint32_t v0_low; uint32_t v1_low; uint32_t high; /* v1 in high half, v0 in low half */ } packed_edge; static inline int64_t get_v0_from_edge(const packed_edge* p) { return (p->v0_low | ((int64_t)((int16_t)(p->high & 0xFFFF)) << 32)); } static inline int64_t get_v1_from_edge(const packed_edge* p) { return (p->v1_low | ((int64_t)((int16_t)(p->high >> 16)) << 32)); } static inline void write_edge(packed_edge* p, int64_t v0, int64_t v1) { p->v0_low = (uint32_t)v0; p->v1_low = (uint32_t)v1; p->high = ((v0 >> 32) & 0xFFFF) | (((v1 >> 32) & 0xFFFF) << 16); } #else typedef struct packed_edge { int64_t v0; int64_t v1; } packed_edge; static inline int64_t get_v0_from_edge(const packed_edge* p) { return p->v0; } static inline int64_t get_v1_from_edge(const packed_edge* p) { return p->v1; } #define MAX_LINES 131072 #define WIDTH 25 #ifdef _OPENMP static inline void write_edge(packed_edge* p, int64_t v0, int64_t v1) { static long buf; #pragma omp threadprivate(buf) static char* buffile = NULL; if (buf==0) { #pragma omp barrier /* PARALLEL */ //if (buffile) fclose(buffile);\q // COPY to db if (buffile) { FILE* f = popen("psql graphssd -c \"COPY edge FROM STDIN DELIMITERS ',';\"","w"); for (int i = 1 + omp_get_thread_num(); i < MAX_LINES * omp_get_num_threads(); i+= omp_get_num_threads()) fprintf(f, "%s", buffile + (i*WIDTH)); fclose(f); } else #pragma omp master { buffile = (char*) malloc(sizeof(char)*WIDTH*MAX_LINES*omp_get_num_threads()); /* #pragma omp master { //if (buffile) fclose(buffile); // COPY to db if (buffile) { FILE* f = popen("psql graph -c \"COPY edge FROM STDIN DELIMITERS ',';\"","w"); for (int i = 1; i < MAX_LINES * omp_get_num_threads(); i++) fprintf(f, "%s", buffile + (i*WIDTH)); fclose(f); } else buffile = (char*) malloc(sizeof(char)*WIDTH*MAX_LINES*omp_get_num_threads()); */ } #pragma omp barrier buf = MAX_LINES-1; } sprintf(buffile+(buf*WIDTH*omp_get_num_threads())+(omp_get_thread_num()*WIDTH), "%ld, %ld\n", v0, v1); #pragma omp atomic buf--; // p->v0 = v0; // p->v1 = v1; } #else static inline void write_edge(packed_edge* p, int64_t v0, int64_t v1) { static long buf = MAX_LINES; static char* buffile = NULL; if (buf==MAX_LINES) { if (buffile) { //FILE* f = popen("psql graphssd -c \"COPY edge FROM STDIN DELIMITERS ',';\"","w"); FILE* f = stdout; for (int i = 0; i < MAX_LINES; i++) { fprintf(f, "%s", buffile + (i*WIDTH)); } //fclose(f); } else buffile = (char*) malloc(sizeof(char)*WIDTH*MAX_LINES); buf = 0; } if (sprintf(buffile+(buf*WIDTH), "%ld \t%ld\n", v0, v1) > WIDTH) printf("aaaah\n"); buf++; } #endif #endif /* Generate a range of edges (from start_edge to end_edge of the total graph), * writing into elements [0, end_edge - start_edge) of the edges array. This * code is parallel on OpenMP and XMT; it must be used with * separately-implemented SPMD parallelism for MPI. */ void generate_kronecker_range( const uint_fast32_t seed[5] /* All values in [0, 2^31 - 1) */, int logN /* In base 2 */, int64_t start_edge, int64_t end_edge /* Indices (in [0, M)) for the edges to generate */, packed_edge* edges /* Size >= end_edge - start_edge */ ); #ifdef __cplusplus } #endif #endif /* GRAPH_GENERATOR_H */
syr2k.dstblock-tile.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* syr2k.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "syr2k.h" /* Array initialization. */ static void init_array(int n, int m, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m)) { int i, j; *alpha = 1.5; *beta = 1.2; for (i = 0; i < n; i++) for (j = 0; j < m; j++) { A[i][j] = (DATA_TYPE) ((i*j+1)%n) / n; B[i][j] = (DATA_TYPE) ((i*j+2)%m) / m; } for (i = 0; i < n; i++) for (j = 0; j < n; j++) { C[i][j] = (DATA_TYPE) ((i*j+3)%n) / m; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("C"); for (i = 0; i < n; i++) for (j = 0; j < n; j++) { if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]); } POLYBENCH_DUMP_END("C"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syr2k(int n, int m, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m), DATA_TYPE POLYBENCH_2D(At,M,N,m,n), DATA_TYPE POLYBENCH_2D(Bt,M,N,m,n)) { /* Sizes * the linesize is 64 bytes on keller and blum * on keller, L1,L2,L3 is 32 KB, 256 KB, 20480 KB * on blum, 32KB , 256 KB, 6 MB * 64 bits per word; each word is 8 bytes */ // Indices int i, j, k; int ii, jj; // PARAMETER 1 // make sure you change the data size when you change this too!!! int cacheSize = 256; // IN kilobytes !!! // PARAMETER 2 int jumpA = floor((cacheSize * 1024) / (4*8*8)); //int jumpB = floor((cacheSize * 1024 ) / (18*8) ); int jumpB = 64; int jump = jumpA; // Misc. Calculations int linesize = 8; // how many bytes per cache line? int blockcount = cacheSize * 1024 / linesize; // kb * (bytes / per kb) / (bytes / per cache line) //BLAS PARAMS //UPLO = 'L' //TRANS = 'N' //A is NxM //At is MxN //B is NxM //Bt is MxN //C is NxN #pragma scop // Note: I can't figure out how to // stack allocate the array; I do this beforehand // and it's untimed. #pragma omp parallel for private(ii,jj,i,j) for (ii=0; ii < _PB_N; ii += jump){ #pragma omp parallel for private(jj,i,j) for(jj=0; jj < _PB_M; jj += jump){ for (i=ii; i < fmin(jump + ii, _PB_N); i++){ for (j=jj; j < fmin(jump + jj, _PB_M); j++){ // Transpose At[j][i] = A[i][j]; Bt[j][i] = B[i][j]; } } } } // At is M by N // Bt is M by N #pragma omp parallel for private(ii,i,j) for (ii=0; ii < _PB_N; ii += jumpB){ for (i = ii; i <fmin(_PB_N, ii+jumpB); i++) { for (j = 0; j <= i; j++){ C[i][j] *= beta; } } } #pragma omp parallel for private(ii,i,j,jj,k) for (ii=0; ii < _PB_N; ii += jumpB){ for (i = ii; i <fmin(_PB_N, ii+jumpB); i++) { for (k = 0; k < _PB_M; k++) #pragma omp parallel for private(jj,j) for (jj=0; jj <= i; jj += jumpB){ for (j = jj; j <= fmin(i,jj+jumpB); j++){ C[i][j] += At[k][j]*alpha*B[i][k] + Bt[k][j]*alpha*A[i][k]; } } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; double footprint = 8*(n*n + 2*n*m); // HAVERFORD added code double FP_ops = 3.0 * m * (n + 1) * n; // HAVERFORD added code #ifdef POLYBENCH_GFLOPS polybench_set_program_flops(FP_ops); // HAVERFORD addition #endif #if defined POLYFORD_VERBOSE printf("Starting %s, n=%8d, m=%8d, Footprint %8.4g M, Source FP ops=%8.4g G\n", __FILE__, n, m, footprint / (1024 * 1024), FP_ops/1000000000.0); #endif /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(At,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(Bt,DATA_TYPE,M,N,m,n); /* Initialize array(s). */ init_array (n, m, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syr2k (n, m, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(At), POLYBENCH_ARRAY(Bt)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
macroIds.c
#include "macroIdsDef.h" #include "macroIds.h" #include <omp.h> #define THREADS_1 2 int tmp() { return 0; } int main(int argc, char ** argv) { int i = 0; #pragma omp parallel num_threads( THREADS_1 ) { i++; } #pragma omp parallel num_threads( THREADS_2 + THREADS_3 ) { i++; } return !(i==0); }
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/feature.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; Quantum *q; ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (Quantum *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MagickPathExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; const Quantum *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) memset(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } *q=0; q+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; const Quantum *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance, % difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageFeatures(image,1,exception); % contrast=channel_features[RedPixelChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageFeatures method is: % % ChannelFeatures *GetImageFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { PixelInfo direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; PixelPacket gray, *grays; MagickBooleanType status; ssize_t i, r; size_t length; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=MaxPixelChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (PixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].alpha=(~0U); grays[i].black=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (r=0; r < (ssize_t) image->rows; r++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(image,p))].red= ScaleQuantumToMap(GetPixelRed(image,p)); grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green= ScaleQuantumToMap(GetPixelGreen(image,p)); grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue= ScaleQuantumToMap(GetPixelBlue(image,p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black= ScaleQuantumToMap(GetPixelBlack(image,p)); if (image->alpha_trait != UndefinedPixelTrait) grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha= ScaleQuantumToMap(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) memset(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].black != ~0U) grays[gray.black++].black=grays[i].black; if (image->alpha_trait != UndefinedPixelTrait) if (grays[i].alpha != ~0U) grays[gray.alpha++].alpha=grays[i].alpha; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.black > number_grays) number_grays=gray.black; if (image->alpha_trait != UndefinedPixelTrait) if (gray.alpha > number_grays) number_grays=gray.alpha; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1, 2*sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1, 2*sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1, 2*sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) memset(&correlation,0,sizeof(correlation)); (void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) memset(&mean,0,sizeof(mean)); (void) memset(sum,0,number_grays*sizeof(*sum)); (void) memset(&sum_squares,0,sizeof(sum_squares)); (void) memset(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) memset(&entropy_x,0,sizeof(entropy_x)); (void) memset(&entropy_xy,0,sizeof(entropy_xy)); (void) memset(&entropy_xy1,0,sizeof(entropy_xy1)); (void) memset(&entropy_xy2,0,sizeof(entropy_xy2)); (void) memset(&entropy_y,0,sizeof(entropy_y)); (void) memset(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) memset(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) memset(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (r=0; r < (ssize_t) image->rows; r++) { const Quantum *magick_restrict p; ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+ 2*distance,distance+2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=distance*GetPixelChannels(image);; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p))) u++; while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p))) u++; while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].black++; cooccurrence[v][u].direction[i].black++; } if (image->alpha_trait != UndefinedPixelTrait) { u=0; v=0; while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p))) u++; while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].alpha++; cooccurrence[v][u].direction[i].alpha++; } } p+=GetPixelChannels(image); } } grays=(PixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].black*=normalize; if (image->alpha_trait != UndefinedPixelTrait) cooccurrence[x][y].direction[i].alpha*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BluePixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].black* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].alpha* cooccurrence[x][y].direction[i].alpha; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].black+=x*y* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) correlation.direction[i].alpha+=x*y* cooccurrence[x][y].direction[i].alpha; /* Inverse Difference Moment. */ channel_features[RedPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BluePixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[y+x+2].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Entropy. */ channel_features[RedPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BluePixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].alpha* MagickLog10(cooccurrence[x][y].direction[i].alpha); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->alpha_trait != UndefinedPixelTrait) density_x[x].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].black+= cooccurrence[x][y].direction[i].black; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_y[y].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].black+=y*sum[y].direction[i].black; sum_squares.direction[i].black+=y*y*sum[y].direction[i].black; } if (image->alpha_trait != UndefinedPixelTrait) { mean.direction[i].alpha+=y*sum[y].direction[i].alpha; sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedPixelChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenPixelChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BluePixelChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].correlation[i]= (correlation.direction[i].black-mean.direction[i].black* mean.direction[i].black)/(sqrt(sum_squares.direction[i].black- (mean.direction[i].black*mean.direction[i].black))*sqrt( sum_squares.direction[i].black-(mean.direction[i].black* mean.direction[i].black))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].correlation[i]= (correlation.direction[i].alpha-mean.direction[i].alpha* mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha- (mean.direction[i].alpha*mean.direction[i].alpha))*sqrt( sum_squares.direction[i].alpha-(mean.direction[i].alpha* mean.direction[i].alpha))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].alpha; /* Sum entropy. */ channel_features[RedPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Sum variance. */ channel_features[RedPixelChannel].sum_variance[i]+= (x-channel_features[RedPixelChannel].sum_entropy[i])* (x-channel_features[RedPixelChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_variance[i]+= (x-channel_features[GreenPixelChannel].sum_entropy[i])* (x-channel_features[GreenPixelChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_variance[i]+= (x-channel_features[BluePixelChannel].sum_entropy[i])* (x-channel_features[BluePixelChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_variance[i]+= (x-channel_features[BlackPixelChannel].sum_entropy[i])* (x-channel_features[BlackPixelChannel].sum_entropy[i])* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_variance[i]+= (x-channel_features[AlphaPixelChannel].sum_entropy[i])* (x-channel_features[AlphaPixelChannel].sum_entropy[i])* density_xy[x].direction[i].alpha; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=(y-mean.direction[i].black+1)* (y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)* (y-mean.direction[i].alpha+1)* cooccurrence[x][y].direction[i].alpha; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy.direction[i].alpha-= cooccurrence[x][y].direction[i].alpha*MagickLog10( cooccurrence[x][y].direction[i].alpha); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].black-=( cooccurrence[x][y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy1.direction[i].alpha-=( cooccurrence[x][y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].black-=(density_x[x].direction[i].black* density_y[y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha* density_y[y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); } } channel_features[RedPixelChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenPixelChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BluePixelChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].variance_sum_of_squares[i]= variance.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].variance_sum_of_squares[i]= variance.direction[i].alpha; } /* Compute more texture features. */ (void) memset(&variance,0,sizeof(variance)); (void) memset(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=density_xy[x].direction[i].alpha; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].black+=density_xy[x].direction[i].black* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha* density_xy[x].direction[i].alpha; /* Difference entropy. */ channel_features[RedPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].black-=(density_x[x].direction[i].black* MagickLog10(density_x[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha* MagickLog10(density_x[x].direction[i].alpha)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].black-=(density_y[x].direction[i].black* MagickLog10(density_y[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha* MagickLog10(density_y[x].direction[i].alpha)); } /* Difference variance. */ channel_features[RedPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BluePixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].black)- (variance.direction[i].black*variance.direction[i].black))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].alpha)- (variance.direction[i].alpha*variance.direction[i].alpha))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BluePixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/ (entropy_x.direction[i].black > entropy_y.direction[i].black ? entropy_x.direction[i].black : entropy_y.direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/ (entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ? entropy_x.direction[i].alpha : entropy_y.direction[i].alpha); channel_features[RedPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BluePixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black- entropy_xy.direction[i].black))))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha- entropy_xy.direction[i].alpha))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { ssize_t y; ChannelStatistics pixel; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) pixel.direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } /* Maximum Correlation Coefficient. */ if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) && (fabs(density_y[x].direction[i].blue) > MagickEpsilon)) Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/ density_x[z].direction[i].blue/density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) if ((fabs(density_x[z].direction[i].black) > MagickEpsilon) && (fabs(density_y[x].direction[i].black) > MagickEpsilon)) Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black* cooccurrence[y][x].direction[i].black/ density_x[z].direction[i].black/density_y[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) if ((fabs(density_x[z].direction[i].alpha) > MagickEpsilon) && (fabs(density_y[x].direction[i].alpha) > MagickEpsilon)) Q[z][y].direction[i].alpha+= cooccurrence[z][x].direction[i].alpha* cooccurrence[y][x].direction[i].alpha/ density_x[z].direction[i].alpha/ density_y[x].direction[i].alpha; } } channel_features[RedPixelChannel].contrast[i]+=z*z* pixel.direction[i].red; channel_features[GreenPixelChannel].contrast[i]+=z*z* pixel.direction[i].green; channel_features[BluePixelChannel].contrast[i]+=z*z* pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].contrast[i]+=z*z* pixel.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].contrast[i]+=z*z* pixel.direction[i].alpha; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BluePixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator % matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2). % Next it searches this space for peaks in counts and converts the locations % of the peaks to slope and intercept in the normal x,y input image space. Use % the slope/intercepts to find the endpoints clipped to the bounds of the % image. The lines are then drawn. The counts are a measure of the length of % the lines. % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/ DefaultResolution; draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireQuantumMemory(1,(size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) memcpy(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MagickPathExtent], path[MagickPathExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MagickPathExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "# x1,y1 x2,y2 # count angle distance\n"); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MagickPathExtent, "line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2, maxima,(double) x,(double) y); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsStringTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,0,0,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse) { mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) mean_image->columns; x++) { PixelInfo mean_pixel, previous_pixel; PointInfo mean_location, previous_location; ssize_t i; GetPixelInfo(image,&mean_pixel); GetPixelInfoPixel(image,p,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; PixelInfo sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetPixelInfo(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelInfo pixel; status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.alpha+=pixel.alpha; count++; } } } } gamma=PerceptibleReciprocal(count); mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.alpha=gamma*sum_pixel.alpha; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q); SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q); SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q); SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q); p+=GetPixelChannels(image); q+=GetPixelChannels(mean_image); } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
GB_unop__identity_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_uint16 // op(A') function: GB_unop_tran__identity_uint64_uint16 // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_uint16 ( uint64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
CellwiseOperator.h
// // Cubism3D // Copyright (c) 2019 CSE-Lab, ETH Zurich, Switzerland. // Distributed under the terms of the MIT license. // // Created by Ivica Kicic (kicici@ethz.ch). // /* * This file contains helper functions for applying single-cell or * stencil-based kernels on the grid. */ #ifndef CUBISMUP3D_CELLWISE_OPERATOR_H #define CUBISMUP3D_CELLWISE_OPERATOR_H #include "Operator.h" CubismUP_3D_NAMESPACE_BEGIN /* * Struct passed to kernels in `apply_kernel` and `apply_stencil_kernel` functions. * * Ideally, we would put all interesting quantities as struct members and let * compiler optimize away unused ones. Although some basic tests show that * compiler indeed do so, it is not sure if that holds for arbitrarily large * structs. * * Thus, we put only the most necessary items in the struct and provide other * values as member functions. */ struct CellInfo { const cubism::BlockInfo &block_info; int ix, iy, iz; std::array<Real, 3> get_pos() const { return block_info.pos<Real>(ix, iy, iz); } int get_abs_ix() const { return ix + block_info.index[0] * FluidBlock::sizeX; } int get_abs_iy() const { return iy + block_info.index[1] * FluidBlock::sizeY; } int get_abs_iz() const { return iz + block_info.index[2] * FluidBlock::sizeZ; } }; /* * Apply a given single-cell kernel to each cell of the grid. * * Usage example: * applyKernel(sim, [](CellInfo info, FluidElement &e) { * e.u = e.tmpU; * e.v = e.tmpV; * e.w = e.tmpW; * }); */ template <typename Func> void applyKernel(SimulationData &sim, Func func) { const std::vector<cubism::BlockInfo> &vInfo = sim.vInfo(); int size = (int)vInfo.size(); #pragma omp parallel for schedule(static) for (int i = 0; i < size; ++i) { FluidBlock &b = *(FluidBlock *)vInfo[i].ptrBlock; for (int iz = 0; iz < FluidBlock::sizeZ; ++iz) for (int iy = 0; iy < FluidBlock::sizeY; ++iy) for (int ix = 0; ix < FluidBlock::sizeX; ++ix) { CellInfo info{vInfo[i], ix, iy, iz}; func(info, b(ix, iy, iz)); } } } /* * Lab wrapper that shifts from cell-based indices (given by the user) to the * block-based indices (required by the original BlockLab). */ struct StencilKernelLab { LabMPI &lab; int ix, iy, iz; FluidElement& operator()(int dx, int dy = 0, int dz = 0) const { return lab(ix + dx, iy + dy, iz + dz); } }; /* * Apply a given stencil kernel to each cell of the grid. * * Usage example: * Real factor = 0.5 / h; * applyStencilKernel( * sim, * StencilInfo{-1, 0, 0, 2, 1, 1, false, 1, FE_U}, * [factor](StencilKernelLab lab, CellInfo info, FluidElement &out) { * out.df = factor * (lab(1, 0, 0).f - lab(-1, 0, 0).f); * }); */ template <typename Func> void applyStencilKernel(SimulationData &sim, cubism::StencilInfo stencil, Func func) { // Block-based kernel. struct Kernel { const cubism::StencilInfo stencil; Func func; void operator()(LabMPI &lab, const cubism::BlockInfo &block_info, FluidBlock &out) const { for (int iz = 0; iz < FluidBlock::sizeZ; ++iz) for (int iy = 0; iy < FluidBlock::sizeY; ++iy) for (int ix = 0; ix < FluidBlock::sizeX; ++ix) { StencilKernelLab lab_wrapper{lab, ix, iy, iz}; CellInfo info{block_info, ix, iy, iz}; func(lab_wrapper, info, out(ix, iy, iz)); } } }; struct CellwiseOperator : Operator { Kernel kernel; CellwiseOperator(SimulationData &s, const cubism::StencilInfo &stencil, Func func) : Operator(s), kernel{stencil, func} {} void operator()(const Real /* dt */) { // For now we ignore the `dt` argument. We could e.g. pass it via the // `CellInfo` struct. In that case, we would need to rename it to // something like `Extra`. compute(kernel); } std::string getName() { return "apply_stencil_kernel::CellwiseOperator"; } }; CellwiseOperator op{sim, stencil, func}; op(0.0); // dt is unused for now. } CubismUP_3D_NAMESPACE_END #endif
GB_unop__asin_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asin_fp32_fp32) // op(A') function: GB (_unop_tran__asin_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = asinf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = asinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASIN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asin_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = asinf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = asinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asin_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
misc.c
/******************************************************************************* Copyright (c) 2016 Advanced Micro Devices, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *******************************************************************************/ //------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ //#define OMP_GPU_OFFLOAD void zero_vector(level_type * level, int id_a){ // zero's the entire grid INCLUDING ghost zones... double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; double * __restrict__ vector_grid = vector_base; int index_grid = id_a * num_my_boxes*box_volume; int size = level->num_my_boxes * level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(to:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ map(from:vector_grid[index_grid: index_grid + size]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; int ilo = my_blocks[block].read.i; int jlo = my_blocks[block].read.j; int klo = my_blocks[block].read.k; int ihi = my_blocks[block].dim.i + ilo; int jhi = my_blocks[block].dim.j + jlo; int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; const int dim = my_boxes[box].dim; // expand the size of the block to include the ghost zones... if(ilo<= 0)ilo-=ghosts; if(jlo<= 0)jlo-=ghosts; if(klo<= 0)klo-=ghosts; if(ihi>=dim)ihi+=ghosts; if(jhi>=dim)jhi+=ghosts; if(khi>=dim)khi+=ghosts; double * __restrict__ grid = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; grid[ijk] = 0.0; } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ void init_vector(level_type * level, int id_a, double scalar){ // initializes the grid to a scalar while zero'ing the ghost zones... double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; int ilo = my_blocks[block].read.i; int jlo = my_blocks[block].read.j; int klo = my_blocks[block].read.k; int ihi = my_blocks[block].dim.i + ilo; int jhi = my_blocks[block].dim.j + jlo; int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; const int dim = my_boxes[box].dim; // expand the size of the block to include the ghost zones... if(ilo<= 0)ilo-=ghosts; if(jlo<= 0)jlo-=ghosts; if(klo<= 0)klo-=ghosts; if(ihi>=dim)ihi+=ghosts; if(jhi>=dim)jhi+=ghosts; if(khi>=dim)khi+=ghosts; double * __restrict__ grid = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; int ghostZone = (i<0) || (j<0) || (k<0) || (i>=dim) || (j>=dim) || (k>=dim); grid[ijk] = ghostZone ? 0.0 : scalar; } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ // add vectors id_a (scaled by scale_a) and id_b (scaled by scale_b) and store the result in vector id_c // i.e. c[] = scale_a*a[] + scale_b*b[] // note, only non ghost zone values are included in this calculation void add_vectors(level_type * level, int id_c, double scale_a, int id_a, double scale_b, int id_b){ double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a, id_b, id_c, scale_a, scale_b) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; const int ilo = my_blocks[block].read.i; const int jlo = my_blocks[block].read.j; const int klo = my_blocks[block].read.k; const int ihi = my_blocks[block].dim.i + ilo; const int jhi = my_blocks[block].dim.j + jlo; const int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; double * __restrict__ grid_b = &vector_base[id_b*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; grid_c[ijk] = scale_a*grid_a[ijk] + scale_b*grid_b[ijk]; } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ // multiply each element of vector id_a by vector id_b and scale, and place the result in vector id_c // i.e. c[]=scale*a[]*b[] // note, only non ghost zone values are included in this calculation void mul_vectors(level_type * level, int id_c, double scale, int id_a, int id_b){ double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a, id_b, id_c, scale) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; const int ilo = my_blocks[block].read.i; const int jlo = my_blocks[block].read.j; const int klo = my_blocks[block].read.k; const int ihi = my_blocks[block].dim.i + ilo; const int jhi = my_blocks[block].dim.j + jlo; const int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; double * __restrict__ grid_b = &vector_base[id_b*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; grid_c[ijk] = scale*grid_a[ijk]*grid_b[ijk]; } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ // invert each element of vector id_a, scale by scale_a, and place the result in vector id_c // i.e. c[]=scale_a/a[] // note, only non ghost zone values are included in this calculation void invert_vector(level_type * level, int id_c, double scale_a, int id_a){ double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a, id_c, scale_a) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; const int ilo = my_blocks[block].read.i; const int jlo = my_blocks[block].read.j; const int klo = my_blocks[block].read.k; const int ihi = my_blocks[block].dim.i + ilo; const int jhi = my_blocks[block].dim.j + jlo; const int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; grid_c[ijk] = scale_a/grid_a[ijk]; } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ // scale vector id_a by scale_a and place the result in vector id_c // i.e. c[]=scale_a*a[] // note, only non ghost zone values are included in this calculation void scale_vector(level_type * level, int id_c, double scale_a, int id_a){ double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; double * __restrict__ vector_grid_a = vector_base; double * __restrict__ vector_grid_c = vector_base; int index_grid_a = id_a * num_my_boxes*box_volume; int index_grid_c = id_c * num_my_boxes*box_volume; int size = level->num_my_boxes * level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(to:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ map(from:vector_grid_c[index_grid_c:index_grid_c + size]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a, id_c, scale_a) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; const int ilo = my_blocks[block].read.i; const int jlo = my_blocks[block].read.j; const int klo = my_blocks[block].read.k; const int ihi = my_blocks[block].dim.i + ilo; const int jhi = my_blocks[block].dim.j + jlo; const int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; double * __restrict__ grid_c = &vector_grid_c[id_c*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; grid_c[ijk] = scale_a*grid_a[ijk]; } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ // return the dot product of vectors id_a and id_b // note, only non ghost zone values are included in this calculation double dot(level_type * level, int id_a, int id_b){ double _timeStart = getTime(); int block; double a_dot_b_level = 0.0; // TODO: Fix this //PRAGMA_THREAD_ACROSS_BLOCKS_SUM(level,block,level->num_my_blocks,a_dot_b_level) for(block=0;block<level->num_my_blocks;block++){ const int box = level->my_blocks[block].read.box; const int ilo = level->my_blocks[block].read.i; const int jlo = level->my_blocks[block].read.j; const int klo = level->my_blocks[block].read.k; const int ihi = level->my_blocks[block].dim.i + ilo; const int jhi = level->my_blocks[block].dim.j + jlo; const int khi = level->my_blocks[block].dim.k + klo; int i,j,k; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int ghosts = level->my_boxes[box].ghosts; double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ grid_b = level->my_boxes[box].vectors[id_b] + ghosts*(1+jStride+kStride); double a_dot_b_block = 0.0; for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; a_dot_b_block += grid_a[ijk]*grid_b[ijk]; }}} a_dot_b_level+=a_dot_b_block; } level->timers.blas1 += (double)(getTime()-_timeStart); #ifdef USE_MPI double _timeStartAllReduce = getTime(); double send = a_dot_b_level; MPI_Allreduce(&send,&a_dot_b_level,1,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE); double _timeEndAllReduce = getTime(); level->timers.collectives += (double)(_timeEndAllReduce-_timeStartAllReduce); #endif return(a_dot_b_level); } //------------------------------------------------------------------------------------------------------------------------------ // return the max (infinity) norm of the vector id_a. // note, only non ghost zone values are included in this calculation double norm(level_type * level, int id_a){ // implements the max norm double _timeStart = getTime(); int block; double max_norm = 0.0; // TODO: Fix this //PRAGMA_THREAD_ACROSS_BLOCKS_MAX(level,block,level->num_my_blocks,max_norm) for(block=0;block<level->num_my_blocks;block++){ const int box = level->my_blocks[block].read.box; const int ilo = level->my_blocks[block].read.i; const int jlo = level->my_blocks[block].read.j; const int klo = level->my_blocks[block].read.k; const int ihi = level->my_blocks[block].dim.i + ilo; const int jhi = level->my_blocks[block].dim.j + jlo; const int khi = level->my_blocks[block].dim.k + klo; int i,j,k; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int ghosts = level->my_boxes[box].ghosts; double * __restrict__ grid = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double block_norm = 0.0; for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; double fabs_grid_ijk = fabs(grid[ijk]); if(fabs_grid_ijk>block_norm){block_norm=fabs_grid_ijk;} // max norm }}} if(block_norm>max_norm){max_norm = block_norm;} } // block list level->timers.blas1 += (double)(getTime()-_timeStart); #ifdef USE_MPI double _timeStartAllReduce = getTime(); double send = max_norm; MPI_Allreduce(&send,&max_norm,1,MPI_DOUBLE,MPI_MAX,level->MPI_COMM_ALLREDUCE); double _timeEndAllReduce = getTime(); level->timers.collectives += (double)(_timeEndAllReduce-_timeStartAllReduce); #endif return(max_norm); } //------------------------------------------------------------------------------------------------------------------------------ // return the mean (arithmetic average value) of vector id_a // essentially, this is a l1 norm by a scaling by the inverse of the total (global) number of cells // note, only non ghost zone values are included in this calculation double mean(level_type * level, int id_a){ double _timeStart = getTime(); int block; double sum_level = 0.0; // TODO: Add target pragma once reduction feature is implemented PRAGMA_THREAD_ACROSS_BLOCKS_SUM(level,block,level->num_my_blocks,sum_level) for(block=0;block<level->num_my_blocks;block++){ const int box = level->my_blocks[block].read.box; const int ilo = level->my_blocks[block].read.i; const int jlo = level->my_blocks[block].read.j; const int klo = level->my_blocks[block].read.k; const int ihi = level->my_blocks[block].dim.i + ilo; const int jhi = level->my_blocks[block].dim.j + jlo; const int khi = level->my_blocks[block].dim.k + klo; int i,j,k; int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int ghosts = level->my_boxes[box].ghosts; double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double sum_block = 0.0; for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; sum_block += grid_a[ijk]; }}} sum_level+=sum_block; } level->timers.blas1 += (double)(getTime()-_timeStart); double ncells_level = (double)level->dim.i*(double)level->dim.j*(double)level->dim.k; #ifdef USE_MPI double _timeStartAllReduce = getTime(); double send = sum_level; MPI_Allreduce(&send,&sum_level,1,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE); double _timeEndAllReduce = getTime(); level->timers.collectives += (double)(_timeEndAllReduce-_timeStartAllReduce); #endif double mean_level = sum_level / ncells_level; return(mean_level); } //------------------------------------------------------------------------------------------------------------------------------ // add the scalar value shift_a to each element of vector id_a and store the result in vector id_c // note, only non ghost zone values are included in this calculation void shift_vector(level_type * level, int id_c, int id_a, double shift_a){ double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a, id_c, shift_a) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; const int ilo = my_blocks[block].read.i; const int jlo = my_blocks[block].read.j; const int klo = my_blocks[block].read.k; const int ihi = my_blocks[block].dim.i + ilo; const int jhi = my_blocks[block].dim.j + jlo; const int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; grid_c[ijk] = grid_a[ijk] + shift_a; }}} } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ // calculate the error between two vectors (id_a and id_b) using either the max (infinity) norm or the L2 norm // note, only non ghost zone values are included in this calculation double error(level_type * level, int id_a, int id_b){ double h3 = level->h * level->h * level->h; add_vectors(level,VECTOR_TEMP,1.0,id_a,-1.0,id_b); // VECTOR_TEMP = id_a - id_b double max = norm(level,VECTOR_TEMP); return(max); // max norm of error function double L2 = sqrt( dot(level,VECTOR_TEMP,VECTOR_TEMP)*h3);return( L2); // normalized L2 error ? } //------------------------------------------------------------------------------------------------------------------------------ // Color the vector id_a with 1's and 0's // The pattern is dictated by the number of colors in each dimension and the 'active' color (i,j,kcolor) // note, only non ghost zone values are included in this calculation // e.g. colors_in_each_dim=3, icolor=1, jcolor=2... // -+---+---+---+- // | 0 | 1 | 0 | // -+---+---+---+- // | 0 | 0 | 0 | // -+---+---+---+- // | 0 | 0 | 0 | // -+---+---+---+- // void color_vector(level_type * level, int id_a, int colors_in_each_dim, int icolor, int jcolor, int kcolor){ double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; double * __restrict__ vector_grid = vector_base; int index_grid = id_a * num_my_boxes*box_volume; int size = level->num_my_boxes * level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(tofrom:vector_grid[index_grid: index_grid + size]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a, colors_in_each_dim, \ icolor, jcolor, kcolor) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; const int ilo = my_blocks[block].read.i; const int jlo = my_blocks[block].read.j; const int klo = my_blocks[block].read.k; const int ihi = my_blocks[block].dim.i + ilo; const int jhi = my_blocks[block].dim.j + jlo; const int khi = my_blocks[block].dim.k + klo; const int boxlowi = my_boxes[box].low.i; const int boxlowj = my_boxes[box].low.j; const int boxlowk = my_boxes[box].low.k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; int i,j,k; double * __restrict__ grid = &vector_grid[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ double sk=0.0; if( ((k+boxlowk+kcolor)%colors_in_each_dim) == 0 ) sk=1.0; // if colors_in_each_dim==1 (don't color), all cells are set to 1.0 for(j=jlo;j<jhi;j++){ double sj=0.0; if( ((j+boxlowj+jcolor)%colors_in_each_dim) == 0 ) sj=1.0; for(i=ilo;i<ihi;i++){ double si=0.0; if( ((i+boxlowi+icolor)%colors_in_each_dim) == 0 ) si=1.0; int ijk = i + j*jStride + k*kStride; grid[ijk] = si*sj*sk; } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------ // Initialize each element of vector id_a with a "random" value. // For simplicity, random is defined as -1.0 or +1.0 and is based on whether the coordinates of the element are even or odd // note, only non ghost zone values are included in this calculation void random_vector(level_type * level, int id_a){ double _timeStart = getTime(); int block; blockCopy_type * my_blocks = level->my_blocks; box_type * my_boxes = level->my_boxes; double * vector_base = level->vectors[0]; int num_my_blocks = level->num_my_blocks; int num_my_boxes = level->num_my_boxes; int num_vectors = level->numVectors; int box_volume = level->box_volume; #pragma omp target \ map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \ map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) #pragma omp teams distribute \ firstprivate(num_my_blocks, num_my_boxes, num_vectors, \ box_volume, id_a) for(block=0;block<num_my_blocks;block++){ const int box = my_blocks[block].read.box; const int ilo = my_blocks[block].read.i; const int jlo = my_blocks[block].read.j; const int klo = my_blocks[block].read.k; const int ihi = my_blocks[block].dim.i + ilo; const int jhi = my_blocks[block].dim.j + jlo; const int khi = my_blocks[block].dim.k + klo; int i,j,k; const int jStride = my_boxes[box].jStride; const int kStride = my_boxes[box].kStride; const int ghosts = my_boxes[box].ghosts; double * __restrict__ grid = &vector_base[id_a*num_my_boxes*box_volume + box*box_volume + ghosts*(1+jStride+kStride)]; #pragma omp parallel for collapse(3) \ if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE) for(k=klo;k<khi;k++){ for(j=jlo;j<jhi;j++){ for(i=ilo;i<ihi;i++){ int ijk = i + j*jStride + k*kStride; grid[ijk] = -1.000 + 2.0*(i^j^k^0x1); } } } } level->timers.blas1 += (double)(getTime()-_timeStart); } //------------------------------------------------------------------------------------------------------------------------------
hwloc.c
/******************************************************************************* * Copyright 2019 UChicago Argonne, LLC. * (c.f. AUTHORS, LICENSE) * * This file is part of the AML project. * For more info, see https://github.com/anlsys/aml * * SPDX-License-Identifier: BSD-3-Clause ******************************************************************************/ #include "config.h" #include "aml.h" #include "aml/area/hwloc.h" #include "aml/higher/replicaset.h" #include "aml/higher/replicaset/hwloc.h" #include "aml/utils/backend/hwloc.h" int aml_replicaset_hwloc_alloc(struct aml_replicaset **out, const hwloc_obj_type_t initiator_type) { struct aml_replicaset *replicaset = NULL; struct aml_replicaset_hwloc_data *data = NULL; // Check initiator type. const unsigned int n_initiator = hwloc_get_nbobjs_by_type(aml_topology, initiator_type); hwloc_obj_t initiator = hwloc_get_obj_by_type(aml_topology, initiator_type, 0); if (n_initiator == 0) return -AML_EDOM; if (initiator == NULL || initiator->cpuset == NULL || hwloc_bitmap_weight(initiator->cpuset) <= 0) return -AML_EINVAL; const unsigned int n_numa = hwloc_get_nbobjs_by_type(aml_topology, HWLOC_OBJ_NUMANODE); // Allocation replicaset = AML_INNER_MALLOC_ARRAY(n_numa + n_initiator, void *, struct aml_replicaset, struct aml_replicaset_hwloc_data); if (replicaset == NULL) return -AML_ENOMEM; // Set ops replicaset->ops = &aml_replicaset_hwloc_ops; // Set data replicaset->data = (struct aml_replicaset_data *)AML_INNER_MALLOC_GET_FIELD( replicaset, 2, struct aml_replicaset, struct aml_replicaset_hwloc_data); data = (struct aml_replicaset_hwloc_data *)replicaset->data; // Set replica pointers array replicaset->replica = (void **)AML_INNER_MALLOC_GET_ARRAY( replicaset, void *, struct aml_replicaset, struct aml_replicaset_hwloc_data); for (unsigned i = 0; i < n_numa; i++) replicaset->replica[i] = NULL; // Set initiator pointers array data->ptr = replicaset->replica + n_numa; // Set number of initiators data->num_ptr = n_initiator; // Set number of replicas to 0. Initialization will set // it to the correct value. replicaset->n = 0; *out = replicaset; return AML_SUCCESS; } int aml_replicaset_hwloc_create(struct aml_replicaset **out, const size_t size, const hwloc_obj_type_t initiator_type, const enum hwloc_distances_kind_e kind) { int err = -AML_FAILURE; struct aml_replicaset *replicaset = NULL; struct aml_replicaset_hwloc_data *data = NULL; struct aml_area *area = NULL; struct aml_area_hwloc_preferred_data *area_data = NULL; const unsigned int n_numa = hwloc_get_nbobjs_by_type(aml_topology, HWLOC_OBJ_NUMANODE); hwloc_obj_t targets[n_numa]; err = aml_replicaset_hwloc_alloc(&replicaset, initiator_type); if (err != AML_SUCCESS) return err; replicaset->size = size; data = (struct aml_replicaset_hwloc_data *)replicaset->data; // For each initiator allocate replica on preferred area for (hwloc_obj_t initiator = hwloc_get_obj_by_type(aml_topology, initiator_type, 0); initiator != NULL; initiator = initiator->next_cousin) { // Get preferred area. err = aml_area_hwloc_preferred_create(&area, initiator, kind); if (err != AML_SUCCESS) goto err_with_replicaset; area_data = (struct aml_area_hwloc_preferred_data *)area->data; // Search if preferred numa node is already a target for (unsigned i = 0; i < replicaset->n; i++) { if (targets[i] == area_data->numanodes[0]) { data->ptr[initiator->logical_index] = replicaset->replica[i]; goto next; } } // Preferred numa node is not a target yet. void *ptr = aml_area_mmap(area, size, NULL); if (ptr == NULL) { err = -AML_ENOMEM; goto err_with_replicas; } replicaset->replica[replicaset->n] = ptr; data->ptr[initiator->logical_index] = ptr; targets[replicaset->n] = area_data->numanodes[0]; replicaset->n++; next: // Area cleanup aml_area_hwloc_preferred_destroy(&area); } // Success *out = replicaset; return AML_SUCCESS; // Failure err_with_replicas: for (unsigned i = 0; i < replicaset->n; i++) munmap(replicaset->replica[i], size); err_with_replicaset: free(replicaset); return err; } void aml_replicaset_hwloc_destroy(struct aml_replicaset **replicaset) { if (replicaset == NULL || *replicaset == NULL) return; for (unsigned int i = 0; i < (*replicaset)->n; i++) munmap((*replicaset)->replica[i], (*replicaset)->size); free(*replicaset); *replicaset = NULL; } int aml_replicaset_hwloc_init(struct aml_replicaset *replicaset, const void *data) { #ifdef _OPENMP #pragma omp parallel for #endif for (unsigned i = 0; i < replicaset->n; i++) memcpy(replicaset->replica[i], data, replicaset->size); return AML_SUCCESS; } int aml_replicaset_hwloc_sync(struct aml_replicaset *replicaset, const unsigned int id) { #ifdef _OPENMP #pragma omp parallel for #endif for (unsigned i = 0; i < replicaset->n; i++) if (i != id) memcpy(replicaset->replica[i], replicaset->replica[id], replicaset->size); return AML_SUCCESS; } void *aml_replicaset_hwloc_local_replica(struct aml_replicaset *replicaset) { int err; hwloc_obj_t initiator; struct aml_replicaset_hwloc_data *data = NULL; data = (struct aml_replicaset_hwloc_data *)replicaset->data; // Get object where this function is called. err = aml_hwloc_local_initiator(&initiator); if (err != AML_SUCCESS) return NULL; // If initiator is a child of the replica location, then select a // parent, at same depth as replica. while (initiator != NULL && hwloc_get_nbobjs_by_depth(aml_topology, initiator->depth) > data->num_ptr) initiator = initiator->parent; if (initiator == NULL) return NULL; // If initiator is bound to a parent of several replica, then // chose a child replica with a modulo on thread id. if (hwloc_get_nbobjs_by_depth(aml_topology, initiator->depth) < data->num_ptr) { int depth = hwloc_get_type_depth(aml_topology, data->type); unsigned n = hwloc_get_nbobjs_inside_cpuset_by_depth( aml_topology, initiator->cpuset, depth); if (n == 0) return NULL; hwloc_obj_t target = hwloc_get_next_obj_inside_cpuset_by_depth( aml_topology, initiator->cpuset, depth, NULL); if (target == NULL) return NULL; initiator = target; } return data->ptr[initiator->logical_index]; } struct aml_replicaset_ops aml_replicaset_hwloc_ops = { .init = aml_replicaset_hwloc_init, .sync = aml_replicaset_hwloc_sync, };
citrix_ns_fmt_plug.c
/* * Description from Nicolas Ruff: * - Salt value is hashed as an hexadecimal string, not bytes. * - The trailing NULL byte of password string is taken into account during * hashing. * - The leading '1' is actually the string length * '1' = 49 = len('1') + len(hex_salt) + len(hex_sha1) * * --------------------------------------- * import hashlib * * def netscaler_hash( rand_bytes, pwd ): * s = hashlib.sha1() * s.update( rand_bytes ) * s.update( pwd ) * return "1" + rand_bytes + s.hexdigest() * * # TEST VECTOR * # 14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f * * rand_bytes = "4dfca1e6" * pwd = "nsroot\x00" * print netscaler_hash( rand_bytes, pwd ) * --------------------------------------- * * This software is Copyright (c) 2013 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * This version is hard coded for salt length 8 (for speed). */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ctrxns; #elif FMT_REGISTERS_H john_register_one(&fmt_ctrxns); #else #include <string.h> #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "johnswap.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #include "simd-intrinsics.h" #include "common.h" #include "sha.h" #include "memdbg.h" // Must be last included header #define FORMAT_LABEL "Citrix_NS10" #define FORMAT_NAME "Netscaler 10" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH (55 - SALT_SIZE - 1) #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ((index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (((i)&3)^3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"100000000f1dc96f425971ba590a076fd0f8bccbf25c1ba0c", ""}, {"14623718525fe334bbd9c0704e06ce134ef17b51f6b33548c", " "}, {"15c5c5c5c6ccd884f6383f55a6aeba5f847775e57ab012675", "Tw"}, {"13333333319143136ba9ff9e18d1cb022b63df0926de9509e", "333"}, {"144434241d7ce89a7484cd202400639692258dde37efc29c5", "four"}, {"100010203e09cefed1847b7a2a5e7a5d2cdc67e8a56ed0bdd", "fiver"}, {"14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f", "nsroot"}, {"1deadcafe7587ea23b25a6ccf3fd53192e36ad3e9a2553b20", "magnum!"}, {NULL} }; #ifdef SIMD_COEF_32 static unsigned char (*saved_key)[SHA_BUF_SIZ * 4 * NBKEYS]; static unsigned char (*crypt_key)[BINARY_SIZE * NBKEYS]; static unsigned int kpc; #else static char saved_salt[SALT_SIZE]; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[BINARY_SIZE / 4]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 saved_key = mem_calloc_align(self->params.max_keys_per_crypt / NBKEYS, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt / NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); kpc = self->params.max_keys_per_crypt; #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } static void *get_binary(char *ciphertext) { static unsigned char *realcipher; int i, len; if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); len = *ciphertext; ciphertext += len - 2 * BINARY_SIZE; for (i = 0; i < BINARY_SIZE; i++) { realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } #ifdef SIMD_COEF_32 alter_endianity(realcipher, BINARY_SIZE); #endif return (void*)realcipher; } static int valid(char *ciphertext, struct fmt_main *self) { int len; len = *ciphertext; if (len != (int)'1') return 0; if (strlen(ciphertext) != len) return 0; if (len != strspn(ciphertext, HEXCHARS_lc)) return 0; return 1; } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 #if ARCH_ALLOWS_UNALIGNED const uint32_t *wkey = (uint32_t*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const uint32_t *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ? key : strcpy(buf_aligned, key)); #endif uint32_t *keybuf_word = (uint32_t*)&saved_key[0][GETPOS(SALT_SIZE ^ 3, index)]; unsigned int len; uint32_t temp; len = SALT_SIZE; while((temp = *wkey++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 16)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80U << 24)); len+=2; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); keybuf_word += SIMD_COEF_32; if (!(temp & 0xff000000)) { *keybuf_word = 0x80000000; len+=3; goto key_cleaning; } len += 4; } *keybuf_word = 0x00800000; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } len += 1; /* Trailing null is included */ ((unsigned int*)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3; #else strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 unsigned int i, s; static char out[PLAINTEXT_LENGTH + 1]; s = (((unsigned int*)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3) - SALT_SIZE - 1; for (i = 0; i < s; i++) out[i] = ((char*)saved_key)[GETPOS(SALT_SIZE + i, index)]; out[i] = 0; return out; #else return saved_key[index]; #endif } static void *get_salt(char *ciphertext) { static union { unsigned char c[SALT_SIZE]; uint32_t w; } out; ciphertext++; memcpy(out.c, ciphertext, SALT_SIZE); return (void*)out.c; } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 int i, index; for (index = 0; index < kpc; index++) for (i = 0; i < SALT_SIZE; i++) saved_key[0][GETPOS(i, index)] = ((unsigned char*)salt)[i]; #else memcpy(saved_salt, salt, SALT_SIZE); #endif } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y=0; for (; y < kpc/SIMD_COEF_32; y++) for (x = 0; x < SIMD_COEF_32; x++) { if (((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5]) return 1; } return 0; #else int index = 0; #ifdef _OPENMP for (index = 0; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x, y; x = index & (SIMD_COEF_32-1); y = (unsigned int)index / SIMD_COEF_32; if (((uint32_t*)binary)[0] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5]) return 0; if (((uint32_t*)binary)[1] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*1]) return 0; if (((uint32_t*)binary)[2] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*2]) return 0; if (((uint32_t*)binary)[3] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*3]) return 0; if (((uint32_t*)binary)[4] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*4]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for for (index = 0; index < loops; ++index) #endif { #ifdef SIMD_COEF_32 SIMDSHA1body(saved_key[index], (unsigned int*)crypt_key[index], NULL, SSEi_MIXED_IN); #else SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char*)saved_salt, SALT_SIZE); SHA1_Update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]) + 1); SHA1_Final((unsigned char*)crypt_key[index], &ctx); #endif } return count; } #ifdef SIMD_COEF_32 #define HASH_IDX ((index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*5) static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *(uint32_t*)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_ctrxns = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */